text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Verify that the given soap request is signed with the certificate
<END_TASK>
<USER_TASK:>
Description:
def verify_envelope(reply, key_file):
"""Verify that the given soap request is signed with the certificate""" |
doc = etree.fromstring(reply)
node = doc.find(".//{%s}Signature" % xmlsec.DSigNs)
if node is None:
raise CertificationError("No signature node found")
dsigCtx = xmlsec.DSigCtx()
xmlsec.addIDs(doc, ['Id'])
signKey = xmlsec.Key.load(key_file, xmlsec.KeyDataFormatPem)
signKey.name = os.path.basename(key_file)
dsigCtx.signKey = signKey
try:
dsigCtx.verify(node)
except xmlsec.VerificationError:
return False
return True |
<SYSTEM_TASK:>
Create the BinarySecurityToken node containing the x509 certificate.
<END_TASK>
<USER_TASK:>
Description:
def create_binary_security_token(key_file):
"""Create the BinarySecurityToken node containing the x509 certificate.
""" |
node = etree.Element(
ns_id('BinarySecurityToken', ns.wssens),
nsmap={ns.wssens[0]: ns.wssens[1]})
node.set(ns_id('Id', ns.wsuns), get_unique_id())
node.set('EncodingType', ns.wssns[1] + 'Base64Binary')
node.set('ValueType', BINARY_TOKEN_TYPE)
with open(key_file) as fh:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, fh.read())
node.text = base64.b64encode(
crypto.dump_certificate(crypto.FILETYPE_ASN1, cert))
return node |
<SYSTEM_TASK:>
Insert a security XML node if it doesn't exist otherwise update it.
<END_TASK>
<USER_TASK:>
Description:
def ensure_security_header(envelope, queue):
"""Insert a security XML node if it doesn't exist otherwise update it.
""" |
(header,) = HEADER_XPATH(envelope)
security = SECURITY_XPATH(header)
if security:
for timestamp in TIMESTAMP_XPATH(security[0]):
queue.push_and_mark(timestamp)
return security[0]
else:
nsmap = {
'wsu': ns.wsuns[1],
'wsse': ns.wssens[1],
}
return _create_element(header, 'wsse:Security', nsmap) |
<SYSTEM_TASK:>
Check the do_x3dna output file and return list of parameters present in the file.
<END_TASK>
<USER_TASK:>
Description:
def checkParametersInputFile(filename):
"""Check the do_x3dna output file and return list of parameters present in the file.
""" |
fin = open(filename, 'r')
line = fin.readline()
line2 = fin.readline()
fin.close()
temp = re.split('\s+', line)
temp2 = re.split('\s+', line2)
if temp[0] == '#Minor':
return groovesParameters
if temp[0] == '#Shift':
return baseStepParameters
if temp[0] == '#X-disp':
return helicalBaseStepParameters
if temp[0] == '#Shear':
return basePairParameters
if temp[0] == '#Position':
return helicalAxisParameters
if temp2[0] == '#P':
return helicalRadiusParameters
if temp2[0] == '#alpha':
return backboneDihedrals |
<SYSTEM_TASK:>
Read a specific parameter from the do_x3dna output file.
<END_TASK>
<USER_TASK:>
Description:
def setParametersFromFile(dna, filename, parameters=None, bp=None):
"""Read a specific parameter from the do_x3dna output file.
It automatically load the input parameter from a file to dna object or HDF5 file.
It automatically decides from input parameter, what will be format of input file.
Parameters
----------
dna : :class:`DNA`
Input :class:`DNA` instance.
filename : str
Input filename. This file should be output from do_x3dna.
parameter : str, list, None
Name of parameter. For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`.
Note that parameter that are calculated from do_x3dna cannot be used here.
In case of `Ǹone`, parameters name will be automatically determine from the input file.
bp : list
List containing lower and higher limit of base-pair/step range.
* This list should not contain more than two number.
* First number should be less than second number.
Example for base-pairs/steps 4 to 15:
``bp = [4,15] # step_range = True``
If ``None``, all base-pairs/steps will be considered.
""" |
gotParameterList = False
param_type = None
# In case of none try to determine from file
if parameters is None:
parameters = checkParametersInputFile(filename)
if parameters is None:
raise AssertionError(" Cannot determine the parameters name from file {0}.".format(filename))
if isinstance(parameters, list) or isinstance(parameters, np.ndarray):
gotParameterList = True
parameter = list(parameters)
param_type = getParameterType(parameter[0])
else:
param_type = getParameterType(parameters)
if bp is None:
if param_type == 'bps':
bp = [dna.startBP, dna.num_step]
else:
bp = [dna.startBP, dna.num_bp]
if len(bp) == 1:
bp_range = False
else:
bp_range = True
if not gotParameterList:
tempParamName = parameters
inputParameter = [ parameters ]
else:
tempParamName = parameters[0]
inputParameter = parameter
sys.stdout.write('\nLoading parameters: {0}'.format(inputParameter))
success = False
if tempParamName in basePairParameters:
dna.set_base_pair_parameters(filename, bp, parameters=inputParameter, bp_range=bp_range)
success = True
if tempParamName in baseStepParameters:
dna.set_base_step_parameters(filename, bp, parameters=inputParameter, step_range=bp_range, helical=False)
success = True
if tempParamName in helicalBaseStepParameters:
dna.set_base_step_parameters(filename, bp, parameters=inputParameter, step_range=bp_range, helical=True)
success = True
if tempParamName in groovesParameters:
dna.set_major_minor_groove(filename, bp, parameters=inputParameter, step_range=bp_range)
success = True
if tempParamName in backboneDihedrals:
dna.set_backbone_dihedrals(filename, bp, parameters=inputParameter, bp_range=bp_range)
success = True
if tempParamName in helicalRadiusParameters:
dna.set_helical_radius(filename, bp, full=True, bp_range=bp_range)
success = True
if tempParamName in helicalAxisParameters:
if len(bp) == 1:
raise AssertionError("Axis cannot be read for a single base-step.\n Use a segment spanned over several basepairs.")
dna.set_helical_axis(filename, step_range=True, step=bp)
success = True
if not success:
raise ValueError ('Not able to load these parameters: {0}... '.format(parameter)) |
<SYSTEM_TASK:>
Read parameters from do_x3dna file.
<END_TASK>
<USER_TASK:>
Description:
def read_param_file(FileName, parameters, bp, bp_range, word=False, startBP=1):
""" Read parameters from do_x3dna file.
It is the main function, which is used to read and extract the parameters
values from the do_x3dna output files.
Parameters
----------
FileName : str
Parameter file produced from do_x3dna.
parameters : list
List of column indices that has to be extracted. indices here start
with one.
bp : 1D list or array
base-pairs to analyze
Example: ::
bp = [6] # bp_range = False
bp = [4,15] # bp_range = True
bp = range(4,15) # bp_range = False
bp = np.arange(4,15) # bp_range = False
bp = [2,5,6,7,9,12,18] # bp_range = False
bp_range : bool
``Default=True``: As shown above, if ``True``, bp is taken as a range otherwise list or numpy array.
word : bool
In some parameters, in place of value, ``'---'`` is present in the file.
If parameter values contain this, use ``True``.
startBP : int
Number ID of first base-pair.
Returns
-------
data : 3D array
Extracted parameters as a 3D array of shape (bp, parameters, time).
time : 1D array
Time of each frame
""" |
sys.stdout.write("\nReading file : %s\n" % FileName)
sys.stdout.flush()
def get_frame_data(block, parameters, bp_idx):
block = np.array(block).T
temp_data = (block[parameters, :])[:, bp_idx].copy()
return temp_data
def get_time(line):
dummy, temp_time = line.split('=')
return float( temp_time )
infile = open(FileName, 'r')
data = []
time = []
frame_number = 0
bp_idx, param_idx = get_idx_of_bp_parameters(bp, parameters, bp_range, startBP=startBP)
block = []
for line in infile:
# Removing last new line character
line = line.rstrip('\n')
# Skipping blank/empty line
if not line.strip():
continue
# Getting Time tag and time => Starting of new frame
if(re.match('# Time', line) != None):
if((frame_number < 100) and (frame_number % 10 == 0)) or ((frame_number < 1000) and (frame_number % 100 == 0)) or ((frame_number < 10000) and (frame_number % 1000 == 0)) or ((frame_number < 100000) and (frame_number % 10000 == 0)) or ((frame_number < 1000000) and (frame_number % 100000 == 0)):
sys.stdout.write("\rReading frame %d" % frame_number)
sys.stdout.flush()
frame_number += 1
# if(frame_number==5000):
# break
# Getting time
time.append(get_time(line))
# Getting parameters/values for base-pairs
if(len(block) > 0):
data.append(get_frame_data(block, param_idx, bp_idx))
block = []
continue
# Skipping other lines starting with '#' tag
if(re.match('#', line) != None):
continue
if not word:
block.append(list(map(float, line.split())))
else:
temp = []
split_line = line.split()
for word in split_line:
if word != '---':
temp.append(float(word))
else:
temp.append(None)
block.append(temp)
# For last frame
data.append(get_frame_data(block, param_idx, bp_idx))
block = []
data_transpose = np.array(data).T
sys.stdout.write(
"\nFinished reading.... Total number of frame read = %d\n" % frame_number)
sys.stdout.flush()
return data_transpose, time |
<SYSTEM_TASK:>
Set time in both class and hdf5 file
<END_TASK>
<USER_TASK:>
Description:
def _set_time(self, time):
""" Set time in both class and hdf5 file
""" |
if len(self.time) == 0 :
self.time = np.array(time)
if self.h5 is not None:
self.h5.create_dataset('time', self.time.shape, dtype=self.time.dtype, data=self.time, compression="gzip", shuffle=True, scaleoffset=3)
else:
if(len(time) != len(self.time)):
raise AssertionError("\nTime or number of frame mismatch in input files.\n Exiting...\n") |
<SYSTEM_TASK:>
Set mask array in both class and hdf5 file
<END_TASK>
<USER_TASK:>
Description:
def _set_mask(self, mask):
""" Set mask array in both class and hdf5 file
""" |
self.mask = mask.copy()
if self.h5 is not None:
if 'mask' in self.h5:
self.h5.pop('mask')
self.h5.create_dataset('mask', mask.shape, dtype=self.mask.dtype, data=mask, compression="gzip", shuffle=True) |
<SYSTEM_TASK:>
To read and set local helical-axis positions from an input file.
<END_TASK>
<USER_TASK:>
Description:
def set_helical_axis(self, filename, step_range=False, step=None):
"""
To read and set local helical-axis positions from an input file.
Parameters
----------
filename : str
Input file, which is generated from do_x3dna. e.g. HelAxis_g.dat
step_range : bool
* ``step_range = True`` : read axis coordinates of base-steps for the given range of base-steps
* ``step_range = False``: read axis coordinates of all base-steps
step : list
List containing lower and higher limit of base-steps range.
* This option only works with ``step_range=True``.
* This list should not contain more than two number.
* First number should be less than second number.
Example for base-step 4 to 15:
``step = [4,15] # step_range = True``
""" |
if (step_range):
if not isinstance(step, list):
raise AssertionError("type %s is not list" % type(step))
if (len(step) > 2):
print (
"ERROR: Range for helical axis should be list of two numbers, e.g. step=[1, 20] \n")
exit(1)
if (step_range) and (step == None):
raise ValueError(
"See, documentation for step and step_range usage!!!")
# Check if requested parameters found within input file
gotParametersInputFile = checkParametersInputFile(filename)
if gotParametersInputFile is None:
raise IOError(' Something wrong in input file {0}.\n Cannot read parameters.\n File should be an output from do_x3dna.'.format(filename))
for p in helicalAxisParameters:
if p not in gotParametersInputFile:
raise ValueError(' Helical axis not found in input file. \n This file contains following parameters: \n {0}'.format(gotParametersInputFile))
targetParameters = { 1:'helical x-axis', 2:'helical y-axis', 3:'helical z-axis' }
if (step_range):
if (len(step) != 2):
raise ValueError("See, documentation for step usage!!!")
if step[0] > step[1]:
raise ValueError("See, documentation for step usage!!!")
data, time = read_param_file(filename, [1, 2, 3], step, True, startBP=self.startBP)
else:
data, time = read_param_file(filename, [1, 2, 3], [1, self.num_step], True, startBP=self.startBP)
self._set_time(time)
if (step_range):
bp_idx, param_idx = get_idx_of_bp_parameters(step, [], True, startBP=self.startBP)
else:
bp_idx, param_idx = get_idx_of_bp_parameters([1, self.num_step], [], True, startBP=self.startBP)
for i in range(len(data)):
for j in range(len(data[i])):
bp_num = str( bp_idx[i]+self.startBP )
param = targetParameters[j+1]
self._set_data(data[i][j], 'bps', bp_num, param, scaleoffset=2) |
<SYSTEM_TASK:>
Indicates whether the playback should loop.
<END_TASK>
<USER_TASK:>
Description:
def loop(self, value):
""" Indicates whether the playback should loop.
Parameters
----------
value : bool
True if playback should loop, False if not.
""" |
if not type(value) == bool:
raise TypeError("can only be True or False")
self._loop = value |
<SYSTEM_TASK:>
Resets the player and discards loaded data.
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
""" Resets the player and discards loaded data. """ |
self.clip = None
self.loaded_file = None
self.fps = None
self.duration = None
self.status = UNINITIALIZED
self.clock.reset()
self.loop_count = 0 |
<SYSTEM_TASK:>
Loads a media file to decode.
<END_TASK>
<USER_TASK:>
Description:
def load_media(self, mediafile, play_audio=True):
""" Loads a media file to decode.
If an audiostream is detected, its parameters will be stored in a
dictionary in the variable `audioformat`. This contains the fields
:nbytes: the number of bytes in the stream (2 is 16-bit sound).
:nchannels: the channels (2 for stereo, 1 for mono)
:fps: the frames per sec/sampling rate of the sound (e.g. 44100 KhZ).
:buffersize: the audioframes per buffer.
If play_audio was set to False, or the video does not have an audiotrack,
`audioformat` will be None.
Parameters
----------
mediafile : str
The path to the media file to load.
play_audio : bool, optional
Indicates whether the audio of a movie should be played.
Raises
------
IOError
When the file could not be found or loaded.
""" |
if not mediafile is None:
if os.path.isfile(mediafile):
self.clip = VideoFileClip(mediafile, audio=play_audio)
self.loaded_file = os.path.split(mediafile)[1]
## Timing variables
# Clip duration
self.duration = self.clip.duration
self.clock.max_duration = self.clip.duration
logger.debug("Video clip duration: {}s".format(self.duration))
# Frames per second of clip
self.fps = self.clip.fps
self.clock.fps = self.clip.fps
logger.debug("Video clip FPS: {}".format(self.fps))
if play_audio and self.clip.audio:
buffersize = int(self.frame_interval*self.clip.audio.fps)
self.audioformat = {
'nbytes': 2,
'nchannels': self.clip.audio.nchannels,
'fps': self.clip.audio.fps,
'buffersize': buffersize
}
logger.debug("Audio loaded: \n{}".format(self.audioformat))
logger.debug("Creating audio buffer of length: "
" {}".format(queue_length))
self.audioqueue = Queue(queue_length)
else:
self.audioformat = None
logger.debug('Loaded {0}'.format(mediafile))
self.status = READY
return True
else:
raise IOError("File not found: {0}".format(mediafile))
return False |
<SYSTEM_TASK:>
Sets the SoundRenderer object. This should take care of processing
<END_TASK>
<USER_TASK:>
Description:
def set_audiorenderer(self, renderer):
""" Sets the SoundRenderer object. This should take care of processing
the audioframes set in audioqueue.
Parameters
----------
renderer : soundrenderers.SoundRenderer
A subclass of soundrenderers.SoundRenderer that takes care of the
audio rendering.
Raises
------
RuntimeError
If no information about the audiostream is available. This could be
because no video has been loaded yet, or because no embedded
audiostream could be detected in the video, or play_sound was set
to False.
""" |
if not hasattr(self, 'audioqueue') or self.audioqueue is None:
raise RuntimeError("No video has been loaded, or no audiostream "
"was detected.")
if not isinstance(renderer, SoundRenderer):
raise TypeError("Invalid renderer object. Not a subclass of "
"SoundRenderer")
self.soundrenderer = renderer
self.soundrenderer.queue = self.audioqueue |
<SYSTEM_TASK:>
Stops the video stream and resets the clock.
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
""" Stops the video stream and resets the clock. """ |
logger.debug("Stopping playback")
# Stop the clock
self.clock.stop()
# Set plauyer status to ready
self.status = READY |
<SYSTEM_TASK:>
Seek to the specified time.
<END_TASK>
<USER_TASK:>
Description:
def seek(self, value):
""" Seek to the specified time.
Parameters
----------
value : str or int
The time to seek to. Can be any of the following formats:
>>> 15.4 -> 15.4 # seconds
>>> (1,21.5) -> 81.5 # (min,sec)
>>> (1,1,2) -> 3662 # (hr, min, sec)
>>> '01:01:33.5' -> 3693.5 #(hr,min,sec)
>>> '01:01:33.045' -> 3693.045
>>> '01:01:33,5' #comma works too
""" |
# Pause the stream
self.pause()
# Make sure the movie starts at 1s as 0s gives trouble.
self.clock.time = max(0.5, value)
logger.debug("Seeking to {} seconds; frame {}".format(self.clock.time,
self.clock.current_frame))
if self.audioformat:
self.__calculate_audio_frames()
# Resume the stream
self.pause() |
<SYSTEM_TASK:>
Aligns audio with video.
<END_TASK>
<USER_TASK:>
Description:
def __calculate_audio_frames(self):
""" Aligns audio with video.
This should be called for instance after a seeking operation or resuming
from a pause. """ |
if self.audioformat is None:
return
start_frame = self.clock.current_frame
totalsize = int(self.clip.audio.fps*self.clip.audio.duration)
self.audio_times = list(range(0, totalsize,
self.audioformat['buffersize'])) + [totalsize]
# Remove audio segments up to the starting frame
del(self.audio_times[0:start_frame]) |
<SYSTEM_TASK:>
Main render loop.
<END_TASK>
<USER_TASK:>
Description:
def __render(self):
""" Main render loop.
Checks clock if new video and audio frames need to be rendered.
If so, it passes the frames to functions that take care
of rendering these frames. """ |
# Render first frame
self.__render_videoframe()
# Start videoclock with start of this thread
self.clock.start()
logger.debug("Started rendering loop.")
# Main rendering loop
while self.status in [PLAYING,PAUSED]:
current_frame_no = self.clock.current_frame
# Check if end of clip has been reached
if self.clock.time >= self.duration:
logger.debug("End of stream reached at {}".format(self.clock.time))
if self.loop:
logger.debug("Looping: restarting stream")
# Seek to the start
self.rewind()
self.loop_count += 1
else:
# End of stream has been reached
self.status = EOS
break
if self.last_frame_no != current_frame_no:
# A new frame is available. Get it from te stream
self.__render_videoframe()
self.last_frame_no = current_frame_no
# Sleeping is a good idea to give the other threads some breathing
# space to do their work.
time.sleep(0.005)
# Stop the clock.
self.clock.stop()
logger.debug("Rendering stopped.") |
<SYSTEM_TASK:>
Retrieves a new videoframe from the stream.
<END_TASK>
<USER_TASK:>
Description:
def __render_videoframe(self):
""" Retrieves a new videoframe from the stream.
Sets the frame as the __current_video_frame and passes it on to
__videorenderfunc() if it is set. """ |
new_videoframe = self.clip.get_frame(self.clock.time)
# Pass it to the callback function if this is set
if callable(self.__videorenderfunc):
self.__videorenderfunc(new_videoframe)
# Set current_frame to current frame (...)
self.__current_videoframe = new_videoframe |
<SYSTEM_TASK:>
Thread that takes care of the audio rendering. Do not call directly,
<END_TASK>
<USER_TASK:>
Description:
def __audiorender_thread(self):
""" Thread that takes care of the audio rendering. Do not call directly,
but only as the target of a thread. """ |
new_audioframe = None
logger.debug("Started audio rendering thread.")
while self.status in [PLAYING,PAUSED]:
# Retrieve audiochunk
if self.status == PLAYING:
if new_audioframe is None:
# Get a new frame from the audiostream, skip to the next one
# if the current one gives a problem
try:
start = self.audio_times.pop(0)
stop = self.audio_times[0]
except IndexError:
logger.debug("Audio times could not be obtained")
time.sleep(0.02)
continue
# Get the frame numbers to extract from the audio stream.
chunk = (1.0/self.audioformat['fps'])*np.arange(start, stop)
try:
# Extract the frames from the audio stream. Does not always,
# succeed (e.g. with bad streams missing frames), so make
# sure this doesn't crash the whole program.
new_audioframe = self.clip.audio.to_soundarray(
tt = chunk,
buffersize = self.frame_interval*self.clip.audio.fps,
quantize=True
)
except OSError as e:
logger.warning("Sound decoding error: {}".format(e))
new_audioframe = None
# Put audioframe in buffer/queue for soundrenderer to pick up. If
# the queue is full, try again after a timeout (this allows to check
# if the status is still PLAYING after a pause.)
if not new_audioframe is None:
try:
self.audioqueue.put(new_audioframe, timeout=.05)
new_audioframe = None
except Full:
pass
time.sleep(0.005)
logger.debug("Stopped audio rendering thread.") |
<SYSTEM_TASK:>
Renders the given layout manager on a page of the given context.
<END_TASK>
<USER_TASK:>
Description:
def render_to_cairo_context(cairo_context, papersize_tuple, layout):
"""Renders the given layout manager on a page of the given context.
Assumes the given context has not yet been reversed in the y-direction
(i.e. it is still the default for Cairo, where y increases up
from the bottom of the page). This method performs the reversal and
resets it before it returns.
""" |
try:
cairo_context.save()
cairo_context.translate(0, papersize_tuple[1])
cairo_context.scale(1, -1)
layout.render(
Rectangle(0, 0, *papersize_tuple),
dict(output=CairoOutput(cairo_context))
)
finally:
cairo_context.restore() |
<SYSTEM_TASK:>
Returns the UTF-8 encoded title
<END_TASK>
<USER_TASK:>
Description:
def title(self):
"""Returns the UTF-8 encoded title""" |
return (u'[{}] {}>>'.format(
os.path.split(os.path.abspath('.'))[-1],
u' '.join(self.command))).encode('utf8') |
<SYSTEM_TASK:>
Perform the query and calculate the time range based on the relative values.
<END_TASK>
<USER_TASK:>
Description:
def query_relative(self, query, event_time=None, relative_duration_before=None, relative_duration_after=None):
"""Perform the query and calculate the time range based on the relative values.""" |
assert event_time is None or isinstance(event_time, datetime.datetime)
assert relative_duration_before is None or isinstance(relative_duration_before, str)
assert relative_duration_after is None or isinstance(relative_duration_after, str)
if event_time is None:
# use now as the default
event_time = datetime.datetime.now()
# use preconfigured defaults
if relative_duration_before is None:
relative_duration_before = self.relative_duration_before
if relative_duration_after is None:
relative_duration_after = self.relative_duration_after
time_start = event_time - create_timedelta(relative_duration_before)
time_end = event_time + create_timedelta(relative_duration_after)
return self.query_with_time(query, time_start, time_end) |
<SYSTEM_TASK:>
Returns the search results as a list of JSON objects.
<END_TASK>
<USER_TASK:>
Description:
def json(self):
"""Returns the search results as a list of JSON objects.""" |
if self.search_results is None:
return None
result = []
for row in self.search_results['rows']:
obj = {}
for index in range(0, len(self.search_results['fields'])):
obj[self.search_results['fields'][index]] = row[index]
result.append(obj)
return result |
<SYSTEM_TASK:>
Draws the given text at x,y.
<END_TASK>
<USER_TASK:>
Description:
def draw_text(self, text:str, x:float, y:float, *,
font_name:str, font_size:float, fill:Color) -> None:
"""Draws the given text at x,y.""" |
pass |
<SYSTEM_TASK:>
Draws the given line.
<END_TASK>
<USER_TASK:>
Description:
def draw_line(
self, x0:float, y0:float, x1:float, y1:float, *,
stroke:Color,
stroke_width:float=1,
stroke_dash:typing.Sequence=None
) -> None:
"""Draws the given line.""" |
pass |
<SYSTEM_TASK:>
Draws the given rectangle.
<END_TASK>
<USER_TASK:>
Description:
def draw_rect(
self, x:float, y:float, w:float, h:float, *,
stroke:Color=None,
stroke_width:float=1,
stroke_dash:typing.Sequence=None,
fill:Color=None
) -> None:
"""Draws the given rectangle.""" |
pass |
<SYSTEM_TASK:>
Draws the given linear path.
<END_TASK>
<USER_TASK:>
Description:
def draw_polygon(
self,
*pts,
close_path:bool=True,
stroke:Color=None,
stroke_width:float=1,
stroke_dash:typing.Sequence=None,
fill:Color=None
) -> None:
"""Draws the given linear path.""" |
pass |
<SYSTEM_TASK:>
Clip further output to this rect.
<END_TASK>
<USER_TASK:>
Description:
def clip_rect(self, x:float, y:float, w:float, h:float) -> None:
"""Clip further output to this rect.""" |
pass |
<SYSTEM_TASK:>
List of file names included by the MANIFEST.in template lines.
<END_TASK>
<USER_TASK:>
Description:
def parse_manifest(template_lines):
"""List of file names included by the MANIFEST.in template lines.""" |
manifest_files = distutils.filelist.FileList()
for line in template_lines:
if line.strip():
manifest_files.process_template_line(line)
return manifest_files.files |
<SYSTEM_TASK:>
Patch the wxPython Classic class to behave like a wxPython
<END_TASK>
<USER_TASK:>
Description:
def _wx_two_step_creation_on_classic(cls):
"""
Patch the wxPython Classic class to behave like a wxPython
Phoenix class on a 2-step creation process.
On wxPython Phoenix, the first step is the parameterless
``__init__``, and the second step is the ``Create`` method with
the construction parameters, e.g.::
class CustomFrame(wx.Frame):
def __init__(self, parent):
super(CustomFrame, self).__init__() # 1st step
# [...]
self.Create(parent) # 2nd step
# [...]
On wxPython Classic, the same would be written as::
class CustomFrame(wx.Frame):
def __init__(self, parent):
pre = wx.PreFrame() # 1st step
# [... using "pre" instead of "self" ...]
pre.Create(parent) # 2nd step
self.PostCreate(pre) # "3rd step"
# [...]
""" |
cls_init = cls.__init__
cls_create = cls.Create
@functools.wraps(cls_init)
def __init__(self, *args, **kwargs):
if args or kwargs:
cls_init(self, *args, **kwargs)
else: # 2-step creation
new_self = getattr(wx, "Pre" + cls.__name__)()
for pair in vars(new_self).items():
setattr(self, *pair)
if sys.platform == "win32":
# On Windows, the wx.Pre*.Create constructors calls the
# EVT_WINDOW_CREATE handler before returning (i.e, it processes
# the event instead of just adding a message to the queue), and
# that shouldn't happen before the PostCreate call in this thread
@functools.wraps(cls_create)
def create(self, *args, **kwargs):
self.SetEvtHandlerEnabled(False)
result = cls_create(self, *args, **kwargs)
self.SetEvtHandlerEnabled(True)
if result:
self.PostCreate(self)
wx.PostEvent(self, wx.WindowCreateEvent(self))
return result
else:
@functools.wraps(cls_create)
def create(self, *args, **kwargs):
result = cls_create(self, *args, **kwargs)
if result:
self.PostCreate(self)
return result
cls.__init__ = __init__
cls.Create = create |
<SYSTEM_TASK:>
Start a running pipeline. The API waits for the pipeline to be fully started.
<END_TASK>
<USER_TASK:>
Description:
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json() |
<SYSTEM_TASK:>
Export the config and rules for a pipeline.
<END_TASK>
<USER_TASK:>
Description:
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json() |
<SYSTEM_TASK:>
Retrieve the current status for a pipeline.
<END_TASK>
<USER_TASK:>
Description:
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json() |
<SYSTEM_TASK:>
Retrieve the current status for a preview.
<END_TASK>
<USER_TASK:>
Description:
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json() |
<SYSTEM_TASK:>
Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
<END_TASK>
<USER_TASK:>
Description:
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json() |
<SYSTEM_TASK:>
Validate a pipeline and show issues.
<END_TASK>
<USER_TASK:>
Description:
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json() |
<SYSTEM_TASK:>
Import a pipeline.
<END_TASK>
<USER_TASK:>
Description:
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json() |
<SYSTEM_TASK:>
Create a new pipeline.
<END_TASK>
<USER_TASK:>
Description:
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
""" |
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json |
<SYSTEM_TASK:>
Import a pipeline from json.
<END_TASK>
<USER_TASK:>
Description:
def import_pipeline(conf, args):
"""Import a pipeline from json.""" |
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite) |
<SYSTEM_TASK:>
Export a pipeline from a lower environment and import into higher environment.
<END_TASK>
<USER_TASK:>
Description:
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment.""" |
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result |
<SYSTEM_TASK:>
Extract the parameters for calculations
<END_TASK>
<USER_TASK:>
Description:
def extractGlobalParameters(self, dna, bp, frames=None, paxis='Z', masked=False):
"""Extract the parameters for calculations
.. currentmodule:: dnaMD
Parameters
----------
dna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance.
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
Returns
-------
time : numpy.ndarray
1D numpy array of shape (nframes) containing time
array : numpy.ndarray
2D numpy array of shape (parameters count, nframes) containing extracted parameters.
""" |
frames = self._validateFrames(frames)
if frames[1] == -1:
frames[1] = None
if (len(bp) != 2):
raise ValueError("bp should be a list containing first and last bp of a segment. See, documentation!!!")
if bp[0] > bp[1]:
raise ValueError("bp should be a list containing first and last bp of a segment. See, documentation!!!")
time, clen = dna.time_vs_parameter('h-rise', bp=bp, merge=True, merge_method='sum', masked=masked)
clen = np.asarray(clen) * 0.1 # conversion to nm
time, htwist = dna.time_vs_parameter('h-twist', bp=bp, merge=True, merge_method='sum', masked=masked)
htwist = np.deg2rad(htwist) # Conversion to radian
angleOne, angleTwo = None, None
if self.esType=='BST':
angleOne, angleTwo = dna.calculate_2D_angles_bw_tangents(paxis, bp, masked=masked)
# Rarely there are nan during angle calculation, remove those nan
nanInOne = np.isnan(angleOne[frames[0]:frames[1]])
nanInTwo = np.isnan(angleTwo[frames[0]:frames[1]])
notNan = ~(nanInOne + nanInTwo)
notNanIdx = np.nonzero(notNan)
array = np.array([angleOne[frames[0]:frames[1]][notNanIdx], angleTwo[frames[0]:frames[1]][notNanIdx],
clen[frames[0]:frames[1]][notNanIdx], htwist[frames[0]:frames[1]][notNanIdx]])
time = (time[frames[0]:frames[1]])[notNanIdx]
else:
array = np.array([clen[frames[0]:frames[1]], htwist[frames[0]:frames[1]]])
time = time[frames[0]:frames[1]]
return time, array |
<SYSTEM_TASK:>
r"""Calculate Bending-Stretching-Twisting matrix
<END_TASK>
<USER_TASK:>
Description:
def getStretchTwistBendModulus(self, bp, frames=None, paxis='Z', masked=True, matrix=False):
r"""Calculate Bending-Stretching-Twisting matrix
It calculate elastic matrix and modulus matrix.
.. math::
\text{modulus matrix} = 4.1419464 \times \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \\
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \\
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \\
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix} \times L_0
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
matrix : bool
If it is ``True``, elastic constant matrix will be returned. Otherwise, by default modulus matrix will be
returned.
Return
------
mean : numpy.ndarray
Value of bending angles, contour length and twist angle (as 1D array) at which energy is zero. Minimum point
on free energy landscape.
.. math::
\begin{bmatrix}
\theta^{x}_0 & \theta^{y}_0 & L_0 & \phi_0
\end{bmatrix}
result : numpy.ndarray
Either elastic matrix or modulus matrix depending on ``matrix`` value.
""" |
if self.esType == 'ST':
raise KeyError(' Use dnaEY.getStretchTwistModulus for Stretching-Twisting modulus.')
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}'.format(bp[0], bp[1], frames[0], frames[1])
if name not in self.esMatrix:
time, array = self.extractGlobalParameters(self.dna, bp, frames=frames, paxis=paxis, masked=masked)
mean = np.mean(array, axis=1)
esMatrix = np.asarray(self.getElasticMatrix(array))
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if not matrix:
result = 4.1419464 * np.array(esMatrix) * mean[2] # Calculate modulus
else:
result = esMatrix
return mean, result |
<SYSTEM_TASK:>
r"""Calculate moduli as a function of time for convergence check
<END_TASK>
<USER_TASK:>
Description:
def getModulusByTime(self, bp, frameGap, masked=False, paxis='Z', outFile=None):
r"""Calculate moduli as a function of time for convergence check
It can be used to obtained elastic moduli as a function of time to check their convergence.
.. note:: Elastic properties cannot be calculated using a single frame because fluctuations are required.
Therefore, here time means trajectory between zero time to given time.
When ``esType='BST'``, following is obtained:
1) bend-1
2) bend-2
3) stretch
4) twist
5) bend-1-bend-2
6) bend-2-stretch
7) stretch-twist
8) bend-1-stretch
9) bend-2-twist
10) bend-1-twist
When ``esType='ST'``, following is obtained:
1) stretch
2) twist
3) stretch-twist
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frameGap : int
How many frames to skip for next calculation. this option will determine the
time-gap between each calculation. Lower the number, slower will be the calculation.
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values of shape (nframes).
Elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (nframes). The keys in dictionary are name of the elasticity in
the same order as listed above.
e.g. ``Elasticities['stretch']`` will give elasticity along stretching as a function of time.
""" |
if self.esType == 'BST':
props_name = [ 'bend-1', 'bend-2', 'stretch', 'twist', 'bend-1-bend-2',
'bend-2-stretch', 'stretch-twist', 'bend-1-stretch',
'bend-2-twist', 'bend-1-twist']
else:
props_name = ['stretch', 'twist', 'stretch-twist']
time, elasticity = [], OrderedDict()
for name in props_name:
elasticity[name] = []
length = len(self.dna.time[:])
for i in range(frameGap, length, frameGap):
props = None
if self.esType == 'BST':
mean, modulus_t = self.getStretchTwistBendModulus(bp, frames=[0, i], paxis=paxis, masked=True)
else:
mean, modulus_t = self.getStretchTwistModulus(bp, frames=[0, i], masked=masked)
modulus_t = matrixToVector(modulus_t)
for p in range(len(props_name)):
elasticity[props_name[p]].append(modulus_t[p])
time.append(self.dna.time[i])
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#Time')
for name in props_name:
fout.write(', {0}'.format(name))
fout.write('\n')
for t in range(len(time)):
fout.write('{0:.3f}'.format(time[t]))
for name in props_name:
fout.write(', {0:.5f}'.format(elasticity[name][t]))
fout.write('\n')
return time, elasticity |
<SYSTEM_TASK:>
r"""Deformation energy of the input DNA using Global elastic properties
<END_TASK>
<USER_TASK:>
Description:
def getGlobalDeformationEnergy(self, bp, complexDna, freeDnaFrames=None, boundDnaFrames=None, paxis='Z',
which='all', masked=False, outFile=None):
r"""Deformation energy of the input DNA using Global elastic properties
It can be used to calculated deformation energy of a input DNA with reference to the DNA present in the current
object.
The deformation free energy is calculated using elastic matrix as follows
.. math::
G = \frac{1}{2L_0}\mathbf{xKx^T}
.. math::
\mathbf{x} = \begin{bmatrix}
(\theta^{x} - \theta^{x}_0) & (\theta^{y} - \theta^{y}_0) & (L - L_0) & (\phi - \phi_0)
\end{bmatrix}
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
complexDna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated.
freeDnaFrames : list
To select a trajectory segment of current (free) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
boundDnaFrames : list
To select a trajectory segment of input (bound) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
paxis : str
Axis parallel to global helical-axis(``'X'``, or ``'Y'`` or ``'Z'``). Only require when bending motions are
included in the calculation.
which : str or list
For which motions, energy should be calculated. It should be either a list containing terms listed below or
"all" for all energy terms.
Following keywords are available:
* ``'full'`` : Use entire elastic matrix -- all motions with their coupling
* ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling
* ``'b1'`` : Only bending-1 motion
* ``'b2'`` : Only bending-2 motion
* ``'stretch'`` : Only stretching motion
* ``'twist'`` : Only Twisting motions
* ``'st_coupling'`` : Only stretch-twist coupling motion
* ``'bs_coupling'`` : Only Bending and stretching coupling
* ``'bt_coupling'`` : Only Bending and Twisting coupling
* ``'bb_coupling'`` : Only bending-1 and bending-2 coupling
* ``'bend'`` : Both bending motions with their coupling
* ``'st'`` : Stretching and twisting motions with their coupling
* ``'bs'`` : Bending (b1, b2) and stretching motions with their coupling
* ``'bt'`` : Bending (b1, b2) and twisting motions with their coupling
masked : bool
``Default=False``. To skip specific frames/snapshots.
``DNA.mask`` array should be set to use this functionality.
This array contains boolean (either ``True`` or ``False``) value
for each frame to mask the frames. Presently, mask array is
automatically generated during :meth:`dnaMD.DNA.generate_smooth_axis` to
skip those frames where 3D fitting curve was not successful within
the given criteria.
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values.
energy : OrderedDict of numpy.ndarray
Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
""" |
if self.esType == 'BST':
energyTerms = self.enGlobalTypes
else:
energyTerms = self.enGlobalTypes[:5]
if isinstance(which, str):
if which != 'all':
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
else:
which = energyTerms
elif isinstance(which, list):
for key in which:
if key not in energyTerms:
raise ValueError('{0} is not a supported keyword.\n Use from the following list: \n{1}'.format(
which, energyTerms))
else:
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(
energyTerms))
if self.esType == 'BST':
means, esMatrix = self.getStretchTwistBendModulus(bp, frames=freeDnaFrames, masked=masked,
matrix=True, paxis=paxis)
else:
means, esMatrix = self.getStretchTwistModulus(bp, frames=freeDnaFrames, masked=masked, matrix=True)
esMatrix = 2.5 * esMatrix # Convert kT to kJ/mol
time, array = self.extractGlobalParameters(complexDna, bp, frames=boundDnaFrames, paxis=paxis, masked=masked)
# Initialize energy dictionary
energyOut = OrderedDict()
for key in which:
energyOut[key] = []
for i in range(array[0].shape[0]):
vec = array[:, i]
diff = vec - means
for key in which:
if self.esType == 'BST':
t_energy = self._calcEnergyBendStretchTwist(diff, esMatrix, key)
else:
t_energy = self._calcEnergyStretchTwist(diff, esMatrix, key)
energyOut[key].append(t_energy)
for key in which:
energyOut[key] = np.asarray(energyOut[key])
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#Time')
for name in which:
fout.write(', {0}'.format(name))
fout.write('\n')
for t in range(len(time)):
fout.write('{0:.3f}'.format(time[t]))
for name in which:
fout.write(', {0:.5f}'.format(energyOut[name][t]))
fout.write('\n')
return time, energyOut |
<SYSTEM_TASK:>
r"""Calculate energy for ``estype='ST'`` using a difference vector.
<END_TASK>
<USER_TASK:>
Description:
def _calcEnergyStretchTwist(self, diff, es, which):
r"""Calculate energy for ``estype='ST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
See ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
""" |
if which not in self.enGlobalTypes[:5]:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enGlobalTypes[:5]))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1]))
if which == 'stretch':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'twist':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'st_coupling':
energy = 0.5 * (diff[0] * diff[1] * es[0][1])
return energy |
<SYSTEM_TASK:>
r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
<END_TASK>
<USER_TASK:>
Description:
def calculateLocalElasticity(self, bp, frames=None, helical=False, unit='kT'):
r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix.
""" |
acceptedUnit = ['kT', 'kJ/mol', 'kcal/mol']
if unit not in acceptedUnit:
raise ValueError(" {0} not accepted. Use any of the following: {1} ".format(unit, acceptedUnit))
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}-local-{4}'.format(bp[0], bp[1], frames[0], frames[1], int(helical))
if bp[1]-bp[0]+1 > 4:
raise ValueError("Selected span {0} is larger than 4, and therefore, not recommended for local elasticity".format(bp[1]-bp[0]+1))
if name not in self.esMatrix:
time, array = self.extractLocalParameters(self.dna, bp, helical=helical, frames=frames)
mean = np.mean(array, axis = 1)
esMatrix = self.getElasticMatrix(array)
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if unit == 'kJ/mol':
result = 2.4946938107879997 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 ) kT.NA/1000
elif unit == 'kcal/mol':
result = 0.5962461306854684 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 / 4.184) kT.NA/1000
else:
result = esMatrix
return mean, result |
<SYSTEM_TASK:>
r"""Calculate local elastic properties as a function of time for convergence check
<END_TASK>
<USER_TASK:>
Description:
def getLocalElasticityByTime(self, bp, frameGap, helical=False, unit='kT', outFile=None):
r"""Calculate local elastic properties as a function of time for convergence check
It can be used to obtained elastic properties as a function of time.
.. note:: Elastic properties cannot be calculated using a single frame because fluctuations are required.
Therefore, here time means trajectory between zero time to given time.
When ``helical='False'``, following is obtained:
1) Shift (:math:`K_{Dx}`)
2) Slide (:math:`K_{Dy}`)
3) Rise (:math:`K_{Dz}`)
4) Tilt (:math:`K_{\tau}`)
5) Roll (:math:`K_{\rho}`)
6) Twist (:math:`K_{\omega}`)
7) :math:`K_{Dx,Dy}`
8) :math:`K_{Dy,Dz}`
9) :math:`K_{Dz,\tau}`
10) :math:`K_{\tau, \rho}`
11) :math:`K_{\rho,\omega}`
12) :math:`K_{Dx,Dz}`
13) :math:`K_{Dy,\tau}`
14) :math:`K_{Dz,\rho}`
15) :math:`K_{\tau,\omega}`
16) :math:`K_{Dx,\tau}`
17) :math:`K_{Dy,\rho}`
18) :math:`K_{Dz,\omega}`
19) :math:`K_{Dx,\rho}`
20) :math:`K_{Dy,\omega}`
21) :math:`K_{Dx,\omega}`
When ``helical='True'``, following is obtained:
1) Shift (:math:`K_{Dx}`)
2) Slide (:math:`K_{Dy}`)
3) Rise (:math:`K_{h}`)
4) Tilt (:math:`K_{\eta}`)
5) Roll (:math:`K_{\theta}`)
6) Twist (:math:`K_{\Omega}`)
7) :math:`K_{dx,dy}`
8) :math:`K_{dy,h}`
9) :math:`K_{h,\eta}`
10) :math:`K_{\eta, \theta}`
11) :math:`K_{\theta,\Omega}`
12) :math:`K_{dx,h}`
13) :math:`K_{dy,\eta}`
14) :math:`K_{h,\theta}`
15) :math:`K_{\tau,\Omega}`
16) :math:`K_{dx,\eta}`
17) :math:`K_{dy,\theta}`
18) :math:`K_{h,\Omega}`
19) :math:`K_{dx,\theta}`
20) :math:`K_{dy,\Omega}`
21) :math:`K_{dx,\Omega}`
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values of shape (nframes).
Elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (nframes). The keys in dictionary are name of the elasticity in
the same order as listed above.
e.g. ``Elasticities['shift']`` will give elasticity along shift parameters as a function of time.
""" |
if helical:
props_name = helical_local_props_vector
else:
props_name = local_props_vector
time, elasticity = [], OrderedDict()
for name in props_name:
elasticity[name] = []
length = len(self.dna.time[:])
for i in range(frameGap, length, frameGap):
mean, esy_t = self.calculateLocalElasticity(bp, frames=[0, i], helical=helical, unit=unit)
esy_t = matrixToVector(esy_t)
for p in range(len(props_name)):
elasticity[props_name[p]].append(esy_t[p])
time.append(self.dna.time[i])
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#Time')
for name in props_name:
fout.write(', {0}'.format(name))
fout.write('\n')
for t in range(len(time)):
fout.write('{0:.3f}'.format(time[t]))
for name in props_name:
fout.write(', {0:.5f}'.format(elasticity[name][t]))
fout.write('\n')
return time, elasticity |
<SYSTEM_TASK:>
Calculate local elastic properties of consecutive overlapped DNA segments
<END_TASK>
<USER_TASK:>
Description:
def calculateLocalElasticitySegments(self, bp, span=2, frameGap=None, helical=False, unit='kT',
err_type='block', tool='gmx analyze', outFile=None):
"""Calculate local elastic properties of consecutive overlapped DNA segments
Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`.
Parameters
----------
bp : list
List of two base-steps forming the global DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
span : int
Length of overlapping (local) DNA segments. It should be less than four.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
err_type : str
Error estimation by autocorrelation method ``err_type='acf'`` or
block averaging method ``err_type='block'``
tool : str
GROMACS tool to calculate error. In older versions it is `g_analyze` while in
newer versions (above 2016) it is `gmx analyze`.
outFile : str
Output file in csv format.
Returns
-------
segments : list
list of DNA segments for which local elastic properties was calculated.
elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.
error : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above..
""" |
if helical:
props_name = helical_local_props_vector
else:
props_name = local_props_vector
segments, errors, elasticities = [], OrderedDict(), OrderedDict()
for name in props_name:
elasticities[name] = []
errors[name] = []
for s in range(bp[0], bp[1]):
if s+span-1 > bp[1]:
break
time, elasticity_t = self.getLocalElasticityByTime([s, s+span-1], frameGap=frameGap, helical=helical, unit=unit)
error_t = dnaMD.get_error(time, list(elasticity_t.values()), len(props_name), err_type=err_type, tool=tool)
for i in range(len(props_name)):
esy_t = elasticity_t[props_name[i]][-1] # only take last entry
elasticities[props_name[i]].append(esy_t)
errors[props_name[i]].append(error_t[i])
segments.append('{0}-{1}'.format(s, s+span-1))
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#bps')
for name in props_name:
fout.write(', {0}, {0}-error'.format(name))
fout.write('\n')
for s in range(len(segments)):
fout.write('{0}'.format(segments[s]))
for name in props_name:
fout.write(', {0:.5f}, {1:.5f}'.format(elasticities[name][s], errors[name][s]))
fout.write('\n')
return segments, elasticities, errors |
<SYSTEM_TASK:>
r"""Deformation energy of the input DNA using local elastic properties
<END_TASK>
<USER_TASK:>
Description:
def getLocalDeformationEnergy(self, bp, complexDna, freeDnaFrames=None, boundDnaFrames=None, helical=False,
unit='kT', which='all', outFile=None):
r"""Deformation energy of the input DNA using local elastic properties
The deformation energy of a base-step/s for probe DNA object with reference to
the same base-step/s DNA present in the current DNA object.
The deformation free energy is calculated using elastic matrix as follows
.. math::
G = \frac{1}{2}\mathbf{xKx^T}
When ``helical='False'``
.. math::
\mathbf{K} = \mathbf{K}_{base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) &
(\rho_i - \rho_0) & (\omega_i - \omega_0)
\end{bmatrix}
When ``helical='True'``
.. math::
\mathbf{K} = \mathbf{K}_{helical-base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) &
(\theta_i - \theta_0) & (\Omega_i - \Omega_0)
\end{bmatrix}
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
complexDna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated.
freeDnaFrames : list
To select a trajectory segment of current (free) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
boundDnaFrames : list
To select a trajectory segment of input (bound) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
which : str or list
For which motions (degrees of freedom), energy should be calculated. It should be either a list containing
terms listed below or"all" for all energy terms.
Following keywords are available:
* ``'full'`` : Use entire elastic matrix -- all parameters with their coupling
* ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling
* ``'shift'`` or ``'x-disp'``
* ``'slide'`` or ``'y-idsp'``
* ``'rise'`` or ``'h-rise'``
* ``'tilt'`` or ``'inclination'``
* ``'roll'`` or ``'tip'``
* ``'twist'`` or ``'h-twist'``
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values.
energy : dict of numpy.ndarray
Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
""" |
if helical:
energyTerms = ['full', 'diag', 'x-disp', 'y-disp', 'h-rise', 'inclination', 'tip', 'h-twist']
else:
energyTerms = ['full', 'diag', 'shift', 'slide', 'rise', 'tilt', 'roll', 'twist']
if isinstance(which, str):
if which != 'all':
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
else:
which = energyTerms
elif isinstance(which, list):
for key in which:
if key not in energyTerms:
raise ValueError('{0} is not a supported keyword.\n Use from the following list: \n{1}'.format(
which, energyTerms))
else:
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
means, esMatrix = self.calculateLocalElasticity(bp, frames=freeDnaFrames, helical=helical, unit=unit)
time, array = self.extractLocalParameters(complexDna, bp, frames=boundDnaFrames, helical=helical)
# Initialize energy dictionary
energyOut = OrderedDict()
for key in which:
energyOut[key] = []
for i in range(array[0].shape[0]):
vec = array[:, i]
diff = vec - means
for key in which:
t_energy = self._calcLocalEnergy(diff, esMatrix, key)
energyOut[key].append(t_energy)
for key in which:
energyOut[key] = np.asarray(energyOut[key])
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#Time')
for name in which:
fout.write(', {0}'.format(name))
fout.write('\n')
for t in range(len(time)):
fout.write('{0:.3f}'.format(time[t]))
for name in which:
fout.write(', {0:.5f}'.format(energyOut[name][t]))
fout.write('\n')
return time, energyOut |
<SYSTEM_TASK:>
r"""Calculate local deformation energy using a difference vector.
<END_TASK>
<USER_TASK:>
Description:
def _calcLocalEnergy(self, diff, es, which):
r"""Calculate local deformation energy using a difference vector.
It is called in :meth:`dnaEY.getLocalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.calculateLocalElasticity` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
see ``which`` parameter in :meth:`dnaEY.getLocalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
""" |
if which not in self.enLocalTypes:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enLocalTypes))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1])
+ (diff[2] ** 2 * es[2][2])
+ (diff[3] ** 2 * es[3][3])
+ (diff[4] ** 2 * es[4][4])
+ (diff[5] ** 2 * es[5][5]))
if which == 'shift' or which == 'x-disp':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'slide' or which == 'y-disp':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'rise' or which == 'h-rise':
energy = 0.5 * (diff[2] ** 2 * es[2][2])
if which == 'tilt' or which == 'inclination':
energy = 0.5 * (diff[3] ** 2 * es[3][3])
if which == 'roll' or which == 'tip':
energy = 0.5 * (diff[4] ** 2 * es[4][4])
if which == 'twist' or which == 'h-twist':
energy = 0.5 * (diff[5] ** 2 * es[5][5])
return energy |
<SYSTEM_TASK:>
Return a list of variable names used in a URI template.
<END_TASK>
<USER_TASK:>
Description:
def extract_variables(href):
"""Return a list of variable names used in a URI template.""" |
patterns = [re.sub(r'\*|:\d+', '', pattern)
for pattern in re.findall(r'{[\+#\./;\?&]?([^}]+)*}', href)]
variables = []
for pattern in patterns:
for part in pattern.split(","):
if not part in variables:
variables.append(part)
return variables |
<SYSTEM_TASK:>
Returns a URL for the link with optional template expansion.
<END_TASK>
<USER_TASK:>
Description:
def url(self, **kwargs):
"""Returns a URL for the link with optional template expansion.
If the link is marked as templated, the href will be expanded according
to RFC6570, using template variables provided in the keyword arguments.
If the href is a valid URI Template, but the link is not marked as
templated, the href will not be expanded even if template variables are
provided.
""" |
if self.is_templated:
return uritemplate.expand(self.template, kwargs)
else:
return self.template |
<SYSTEM_TASK:>
Returns a new ``Link`` based on a JSON object or array.
<END_TASK>
<USER_TASK:>
Description:
def from_object(cls, o, base_uri):
"""Returns a new ``Link`` based on a JSON object or array.
Arguments:
- ``o``: a dictionary holding the deserializated JSON for the new
``Link``, or a ``list`` of such documents.
- ``base_uri``: optional URL used as the basis when expanding
relative URLs in the link.
""" |
if isinstance(o, list):
if len(o) == 1:
return cls.from_object(o[0], base_uri)
return [cls.from_object(x, base_uri) for x in o]
return cls(o, base_uri) |
<SYSTEM_TASK:>
Install an asyncio protocol to process NOTIFY messages.
<END_TASK>
<USER_TASK:>
Description:
async def install_mediaroom_protocol(responses_callback, box_ip=None):
"""Install an asyncio protocol to process NOTIFY messages.""" |
from . import version
_LOGGER.debug(version)
loop = asyncio.get_event_loop()
mediaroom_protocol = MediaroomProtocol(responses_callback, box_ip)
sock = create_socket()
await loop.create_datagram_endpoint(lambda: mediaroom_protocol, sock=sock)
return mediaroom_protocol |
<SYSTEM_TASK:>
XML node representing tune.
<END_TASK>
<USER_TASK:>
Description:
def tune(self):
"""XML node representing tune.""" |
if self._node.get('activities'):
tune = self._node['activities'].get('tune')
if type(tune) is collections.OrderedDict:
return tune
elif type(tune) is list:
return tune[0]
return tune
return None |
<SYSTEM_TASK:>
Return if the stream is stopped.
<END_TASK>
<USER_TASK:>
Description:
def stopped(self):
"""Return if the stream is stopped.""" |
if self.tune and self.tune.get('@stopped'):
return True if self.tune.get('@stopped') == 'true' else False
else:
raise PyMediaroomError("No information in <node> about @stopped") |
<SYSTEM_TASK:>
Return if the stream is a timeshift.
<END_TASK>
<USER_TASK:>
Description:
def timeshift(self):
"""Return if the stream is a timeshift.""" |
if self.tune and self.tune.get('@src'):
return True if self.tune.get('@src').startswith('timeshift') else False
else:
raise PyMediaroomError("No information in <node> about @src") |
<SYSTEM_TASK:>
Return if the stream is a recording.
<END_TASK>
<USER_TASK:>
Description:
def recorded(self):
"""Return if the stream is a recording.""" |
if self.tune and self.tune.get('@src'):
return True if self.tune.get('@src').startswith('mbr') else False
else:
raise PyMediaroomError("No information in <node> about @src") |
<SYSTEM_TASK:>
Datagram received callback.
<END_TASK>
<USER_TASK:>
Description:
def datagram_received(self, data, addr):
"""Datagram received callback.""" |
#_LOGGER.debug(data)
if not self.box_ip or self.box_ip == addr[0]:
self.responses(MediaroomNotify(addr, data)) |
<SYSTEM_TASK:>
Pauses the clock to continue running later.
<END_TASK>
<USER_TASK:>
Description:
def pause(self):
""" Pauses the clock to continue running later.
Saves the duration of the current interval in the previous_intervals
list.""" |
if self.status == RUNNING:
self.status = PAUSED
self.previous_intervals.append(time.time() - self.interval_start)
self.current_interval_duration = 0.0
elif self.status == PAUSED:
self.interval_start = time.time()
self.status = RUNNING |
<SYSTEM_TASK:>
Starts the clock from 0.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
""" Starts the clock from 0.
Uses a separate thread to handle the timing functionalities. """ |
if not hasattr(self,"thread") or not self.thread.isAlive():
self.thread = threading.Thread(target=self.__run)
self.status = RUNNING
self.reset()
self.thread.start()
else:
print("Clock already running!") |
<SYSTEM_TASK:>
Internal function that is run in a separate thread. Do not call
<END_TASK>
<USER_TASK:>
Description:
def __run(self):
""" Internal function that is run in a separate thread. Do not call
directly. """ |
self.interval_start = time.time()
while self.status != STOPPED:
if self.status == RUNNING:
self.current_interval_duration = time.time() - self.interval_start
# If max_duration is set, stop the clock if it is reached
if self.max_duration and self.time > self.max_duration:
self.status == STOPPED
# One refresh per millisecond seems enough
time.sleep(0.001) |
<SYSTEM_TASK:>
The current frame number that should be displayed.
<END_TASK>
<USER_TASK:>
Description:
def current_frame(self):
""" The current frame number that should be displayed.""" |
if not self.__fps:
raise RuntimeError("fps not set so current frame number cannot be"
" calculated")
else:
return int(self.__fps * self.time) |
<SYSTEM_TASK:>
Sets the frames per second of the current movie the clock is used for.
<END_TASK>
<USER_TASK:>
Description:
def fps(self,value):
""" Sets the frames per second of the current movie the clock is used for.
Parameters
----------
value : float
The fps value.
""" |
if not value is None:
if not type(value) == float:
raise ValueError("fps needs to be specified as a float")
if value<1.0:
raise ValueError("fps needs to be greater than 1.0")
self.__fps = value |
<SYSTEM_TASK:>
Sets the value of max duration
<END_TASK>
<USER_TASK:>
Description:
def max_duration(self,value):
""" Sets the value of max duration
Parameters
----------
value : float
The value for max_duration
Raises
------
TypeError
If max_duration is not a number.
ValueError
If max_duration is smaller than 0.
""" |
if not value is None:
if not type(value) in [float, int]:
raise TypeError("max_duration needs to be specified as a number")
if value<1.0:
raise ValueError("max_duration needs to be greater than 1.0")
value = float(value)
self.__max_duration = value |
<SYSTEM_TASK:>
Loads the string data from a text file that was packaged as a
<END_TASK>
<USER_TASK:>
Description:
def get_shared(fname, encoding="utf-8"):
"""
Loads the string data from a text file that was packaged as a
data file in the distribution.
Uses the setuptools ``pkg_resources.resource_string`` function as
a fallback, as installing Dose with it directly instead of using
wheel/pip would store the setup.py ``data_files`` otherwhere. For
more information, see this:
https://github.com/pypa/setuptools/issues/130
""" |
relative_path = "share/dose/v{0}/{1}".format(__version__, fname)
prefixed_path = os.path.join(sys.prefix, *relative_path.split("/"))
# Look for the file directly on sys.prefix
try:
return "\n".join(read_plain_text(prefixed_path, encoding=encoding))
except IOError:
pass
# Homebrew (Mac OS X) stores the data in Cellar, a directory in
# the system prefix. Calling "brew --prefix" returns that prefix,
# and pip installs the shared resources there
cellar_index = sys.prefix.find("/Cellar/")
if cellar_index != -1: # Found!
outside_cellar_path = os.path.join(sys.prefix[:cellar_index],
*relative_path.split("/"))
try:
return "\n".join(read_plain_text(outside_cellar_path,
encoding=encoding))
except IOError:
pass
# Fallback: look for the file using setuptools (perhaps it's still
# compressed inside an egg file or stored otherwhere)
from pkg_resources import Requirement, resource_string
return resource_string(Requirement.parse("dose"), relative_path) |
<SYSTEM_TASK:>
Returns list of agents based on their state and connectedness
<END_TASK>
<USER_TASK:>
Description:
def get_agents(self, state_id=None, limit_neighbors=False):
"""Returns list of agents based on their state and connectedness
Parameters
----------
state_id : int, str, or array-like, optional
Used to select agents that have the same specified "state". If state = None, returns all agents regardless
of its current state
limit_neighbors : bool, optional
Returns agents based on whether they are connected to this agent or not. If limit_neighbors = False,
returns all agents whether or not it is directly connected to this agent
""" |
if limit_neighbors:
agents = self.global_topology.neighbors(self.id)
else:
agents = self.get_all_nodes()
if state_id is None:
return [self.global_topology.node[_]['agent'] for _ in agents] # return all regardless of state
else:
return [self.global_topology.node[_]['agent'] for _ in agents
if self.global_topology.node[_]['agent'].state['id'] == state_id] |
<SYSTEM_TASK:>
Add a new node to the current network
<END_TASK>
<USER_TASK:>
Description:
def add_node(self, agent_type=None, state=None, name='network_process', **state_params):
"""Add a new node to the current network
Parameters
----------
agent_type : NetworkAgent subclass
Agent in the new node will be instantiated using this agent class
state : object
State of the Agent, this may be an integer or string or any other
name : str, optional
Descriptive name of the agent
state_params : keyword arguments, optional
Key-value pairs of other state parameters for the agent
Return
------
int
Agent ID of the new node
""" |
agent_id = int(len(self.global_topology.nodes()))
agent = agent_type(self.env, agent_id=agent_id, state=state, name=name, **state_params)
self.global_topology.add_node(agent_id, {'agent': agent})
return agent_id |
<SYSTEM_TASK:>
Add an edge between agent_id1 and agent_id2. agent_id1 and agent_id2 correspond to Networkx node IDs.
<END_TASK>
<USER_TASK:>
Description:
def add_edge(self, agent_id1, agent_id2, edge_attr_dict=None, *edge_attrs):
"""
Add an edge between agent_id1 and agent_id2. agent_id1 and agent_id2 correspond to Networkx node IDs.
This is a wrapper for the Networkx.Graph method `.add_edge`.
Agents agent_id1 and agent_id2 will be automatically added if they are not already present in the graph.
Edge attributes can be specified using keywords or passing a dictionary with key-value pairs
Parameters
----------
agent_id1, agent_id2 : nodes
Nodes (as defined by Networkx) can be any hashable type except NoneType
edge_attr_dict : dictionary, optional (default = no attributes)
Dictionary of edge attributes. Assigns values to specified keyword attributes and overwrites them if already
present.
edge_attrs : keyword arguments, optional
Edge attributes such as labels can be assigned directly using keyowrd arguments
""" |
if agent_id1 in self.global_topology.nodes(data=False):
if agent_id2 in self.global_topology.nodes(data=False):
self.global_topology.add_edge(agent_id1, agent_id2, edge_attr_dict=edge_attr_dict, *edge_attrs)
else:
raise ValueError('\'agent_id2\'[{}] not in list of existing agents in the network'.format(agent_id2))
else:
raise ValueError('\'agent_id1\'[{}] not in list of existing agents in the network'.format(agent_id1)) |
<SYSTEM_TASK:>
Perform a request on a given httplib connection object taken from our
<END_TASK>
<USER_TASK:>
Description:
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
""" |
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.request(method, url, **httplib_request_kw)
conn.sock.settimeout(timeout)
httplib_response = conn.getresponse()
log.debug("\"%s %s %s\" %s %s" %
(method, url,
conn._http_vsn_str, # pylint: disable-msg=W0212
httplib_response.status, httplib_response.length))
return httplib_response |
<SYSTEM_TASK:>
Check if the given ``url`` is a member of the same host as this
<END_TASK>
<USER_TASK:>
Description:
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
conncetion pool.
""" |
# TODO: Add optional support for socket.gethostbyname checking.
return (url.startswith('/') or
get_host(url) == (self.scheme, self.host, self.port)) |
<SYSTEM_TASK:>
Called to see if a member can be documented by this documenter.
<END_TASK>
<USER_TASK:>
Description:
def can_document_member(cls, member, membername, isattr, parent):
"""Called to see if a member can be documented by this documenter.""" |
if not super().can_document_member(member, membername, isattr, parent):
return False
return iscoroutinefunction(member) |
<SYSTEM_TASK:>
Return all registry items if key is None, otherwise try to fetch the registry key
<END_TASK>
<USER_TASK:>
Description:
def get(context, request, key=None):
"""Return all registry items if key is None, otherwise try to fetch the registry key
""" |
registry_records = api.get_registry_records_by_keyword(key)
# Prepare batch
size = req.get_batch_size()
start = req.get_batch_start()
batch = api.make_batch(registry_records, size, start)
return {
"pagesize": batch.get_pagesize(),
"next": batch.make_next_url(),
"previous": batch.make_prev_url(),
"page": batch.get_pagenumber(),
"pages": batch.get_numpages(),
"count": batch.get_sequence_length(),
"items": [registry_records],
"url": api.url_for("senaite.jsonapi.v1.registry", key=key),
} |
<SYSTEM_TASK:>
Merges kwarg dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
""" |
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, basestring):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
# Update new values.
kwargs = default_kwarg.copy()
kwargs.update(local_kwarg)
# Remove keys that are set to None.
for (k,v) in local_kwarg.items():
if v is None:
del kwargs[k]
return kwargs |
<SYSTEM_TASK:>
Runs the complete simulation
<END_TASK>
<USER_TASK:>
Description:
def run_simulation(self):
"""Runs the complete simulation""" |
print('Starting simulations...')
for i in range(self.num_trials):
print('---Trial {}---'.format(i))
self.run_trial(i)
print('Simulation completed.') |
<SYSTEM_TASK:>
Initializes agents on nodes of graph and registers them to the SimPy environment
<END_TASK>
<USER_TASK:>
Description:
def setup_network_agents(self):
"""Initializes agents on nodes of graph and registers them to the SimPy environment""" |
for i in self.env.G.nodes():
self.env.G.node[i]['agent'] = self.agent_type(environment=self.env, agent_id=i,
state=deepcopy(self.initial_states[i])) |
<SYSTEM_TASK:>
Maximal chordal subgraph of sparsity graph.
<END_TASK>
<USER_TASK:>
Description:
def maxchord(A, ve = None):
"""
Maximal chordal subgraph of sparsity graph.
Returns a lower triangular sparse matrix which is the projection
of :math:`A` on a maximal chordal subgraph and a perfect
elimination order :math:`p`. Only the
lower triangular part of :math:`A` is accessed. The
optional argument `ve` is the index of the last vertex to be
eliminated (the default value is `n-1`). If :math:`A` is chordal,
then the matrix returned is equal to :math:`A`.
:param A: :py:class:`spmatrix`
:param ve: integer between 0 and `A.size[0]`-1 (optional)
.. seealso::
P. M. Dearing, D. R. Shier, D. D. Warner, `Maximal chordal
subgraphs <http://dx.doi.org/10.1016/0166-218X(88)90075-3>`_,
Discrete Applied Mathematics, 20:3, 1988, pp. 181-190.
""" |
n = A.size[0]
assert A.size[1] == n, "A must be a square matrix"
assert type(A) is spmatrix, "A must be a sparse matrix"
if ve is None:
ve = n-1
else:
assert type(ve) is int and 0<=ve<n,\
"ve must be an integer between 0 and A.size[0]-1"
As = symmetrize(A)
cp,ri,val = As.CCS
# permutation vector
p = matrix(0,(n,1))
# weight array
w = matrix(0,(n,1))
max_w = 0
S = [list(range(ve))+list(range(ve+1,n))+[ve]] + [[] for i in range(n-1)]
C = [set() for i in range(n)]
E = [[] for i in range(n)] # edge list
V = [[] for i in range(n)] # num. values
for i in range(n-1,-1,-1):
# select next node to number
while True:
if len(S[max_w]) > 0:
v = S[max_w].pop()
if w[v] >= 0: break
else:
max_w -= 1
p[i] = v
w[v] = -1 # set w[v] = -1 to mark that node v has been numbered
# loop over unnumbered neighbors of node v
for ii in range(cp[v],cp[v+1]):
u = ri[ii]
d = val[ii]
if w[u] >= 0:
if C[u].issubset(C[v]):
C[u].update([v])
w[u] += 1
S[w[u]].append(u) # bump up u to S[w[u]]
max_w = max(max_w,w[u]) # update max deg.
E[min(u,v)].append(max(u,v))
V[min(u,v)].append(d)
elif u == v:
E[u].append(u)
V[u].append(d)
# build adjacency matrix of reordered max. chordal subgraph
Am = spmatrix([d for d in chain.from_iterable(V)],[i for i in chain.from_iterable(E)],\
[i for i in chain.from_iterable([len(Ej)*[j] for j,Ej in enumerate(E)])],(n,n))
return Am,p |
<SYSTEM_TASK:>
Main Interface to generate xml documents
<END_TASK>
<USER_TASK:>
Description:
def cli(config, in_file, out_file, verbose):
"""Main Interface to generate xml documents
from custom dictionaries using legal xsd files
complying with legal documents in all countires
around the world.
""" |
config.out_file = out_file
config.verbose = verbose
config.in_file = in_file
config.out_file = out_file |
<SYSTEM_TASK:>
Format cfdi v3.2 for Mexico.
<END_TASK>
<USER_TASK:>
Description:
def cfdv32mx(config):
"""Format cfdi v3.2 for Mexico.
\b
File where the files will be written document.xml.
cfdicli --in_file /path/to/yout/json/documnt.json cfdv32mx
\b
File where the files will be written from document.json.
cfdicli --out_file ./document.xml cfdv32mx
""" |
# TODO: look for a secure option for eval.
# Or simply the CLI only should manage json?
# TODO: Implement json option also.
dict_input = eval(config.in_file.read())
invoice = cfdv32.get_invoice(dict_input)
if invoice.valid:
config.out_file.write(invoice.document)
config.out_file.flush()
click.echo('Document %s has been created.' % config.out_file.name)
else:
click.echo(invoice.ups.message) |
<SYSTEM_TASK:>
Callback function for the pyaudio stream. Don't use directly.
<END_TASK>
<USER_TASK:>
Description:
def get_frame(self, in_data, frame_count, time_info, status):
""" Callback function for the pyaudio stream. Don't use directly. """ |
while self.keep_listening:
try:
frame = self.queue.get(False, timeout=queue_timeout)
return (frame, pyaudio.paContinue)
except Empty:
pass
return (None, pyaudio.paComplete) |
<SYSTEM_TASK:>
Calculate appropriate texture size.
<END_TASK>
<USER_TASK:>
Description:
def calc_scaled_res(self, screen_res, image_res):
"""Calculate appropriate texture size.
Calculate size or required texture so that it will fill the window,
but retains the movies original aspect ratio.
Parameters
----------
screen_res : tuple
Display window size/Resolution
image_res : tuple
Image width and height
Returns
-------
tuple
width and height of image scaled to window/screen
""" |
rs = screen_res[0]/float(screen_res[1])
ri = image_res[0]/float(image_res[1])
if rs > ri:
return (int(image_res[0] * screen_res[1]/image_res[1]), screen_res[1])
else:
return (screen_res[0], int(image_res[1]*screen_res[0]/image_res[0])) |
<SYSTEM_TASK:>
Loads a video.
<END_TASK>
<USER_TASK:>
Description:
def load_media(self, vidSource):
""" Loads a video.
Parameters
----------
vidSource : str
The path to the video file
""" |
if not os.path.exists(vidSource):
print("File not found: " + vidSource)
pygame.display.quit()
pygame.quit()
sys.exit(1)
self.decoder.load_media(vidSource)
self.decoder.loop = self.loop
pygame.display.set_caption(os.path.split(vidSource)[1])
self.vidsize = self.decoder.clip.size
self.destsize = self.calc_scaled_res(self.windowSize, self.vidsize)
self.vidPos = ((self.windowSize[0] - self.destsize[0]) / 2, (self.windowSize[1] - self.destsize[1]) / 2)
self.__textureSetup()
if(self.decoder.audioformat):
if self.soundrenderer == "pygame":
from mediadecoder.soundrenderers import SoundrendererPygame
self.audio = SoundrendererPygame(self.decoder.audioformat)
elif self.soundrenderer == "pyaudio":
from mediadecoder.soundrenderers.pyaudiorenderer import SoundrendererPyAudio
self.audio = SoundrendererPyAudio(self.decoder.audioformat)
elif self.soundrenderer == "sounddevice":
from mediadecoder.soundrenderers.sounddevicerenderer import SoundrendererSounddevice
self.audio = SoundrendererSounddevice(self.decoder.audioformat)
self.decoder.set_audiorenderer(self.audio) |
<SYSTEM_TASK:>
Update the texture with the newly supplied frame.
<END_TASK>
<USER_TASK:>
Description:
def __texUpdate(self, frame):
""" Update the texture with the newly supplied frame. """ |
# Retrieve buffer from videosink
if self.texture_locked:
return
self.buffer = frame
self.texUpdated = True |
<SYSTEM_TASK:>
Foreground color formatter function factory.
<END_TASK>
<USER_TASK:>
Description:
def fg(color):
"""
Foreground color formatter function factory.
Each function casts from a unicode string to a colored bytestring
with the respective foreground color and foreground reset ANSI
escape codes. You can also use the ``fg.color`` or ``fg[color]``
directly as attributes/items.
The colors are the names of the ``colorama.Fore`` attributes
(case insensitive). For more information, see:
https://pypi.python.org/pypi/colorama
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
""" |
ansi_code = [getattr(colorama.Fore, color.upper()), colorama.Fore.RESET]
return lambda msg: msg.join(ansi_code) |
<SYSTEM_TASK:>
Same to ``log``, but this one centralizes the message first.
<END_TASK>
<USER_TASK:>
Description:
def clog(color):
"""Same to ``log``, but this one centralizes the message first.""" |
logger = log(color)
return lambda msg: logger(centralize(msg).rstrip()) |
<SYSTEM_TASK:>
Stores the terminal width into ``self.width``, if possible.
<END_TASK>
<USER_TASK:>
Description:
def retrieve_width(self, signum=None, frame=None):
"""
Stores the terminal width into ``self.width``, if possible.
This function is also the SIGWINCH event handler.
""" |
for method_name, args in self.strategies:
method = getattr(self, "from_" + method_name)
width = method(*args)
if width and width > 0:
self.width = width
break # Found!
os.environ["COLUMNS"] = str(self.width) |
<SYSTEM_TASK:>
'part' may be the hash_key if we are dumping just a few hash_keys - else
<END_TASK>
<USER_TASK:>
Description:
def dump_part(part, total_segments=None):
"""
'part' may be the hash_key if we are dumping just a few hash_keys - else
it will be the segment number
""" |
try:
connection = Connection(host=config['host'], region=config['region'])
filename = ".".join([config['table_name'], str(part), "dump"])
if config['compress']:
opener = gzip.GzipFile
filename += ".gz"
else:
opener = open
dumper = BatchDumper(connection, config['table_name'], config['capacity'], part, total_segments)
with opener(filename, 'w') as output:
while dumper.has_items:
items = dumper.get_items()
for item in items:
output.write(json.dumps(item))
output.write("\n")
output.flush()
config['queue'].put(len(items))
config['queue'].put('complete')
except Exception as e:
print('Unhandled exception: {0}'.format(e)) |
<SYSTEM_TASK:>
Supernodal multifrontal Hessian mapping.
<END_TASK>
<USER_TASK:>
Description:
def hessian(L, Y, U, adj = False, inv = False, factored_updates = False):
"""
Supernodal multifrontal Hessian mapping.
The mapping
.. math::
\mathcal H_X(U) = P(X^{-1}UX^{-1})
is the Hessian of the log-det barrier at a positive definite chordal
matrix :math:`X`, applied to a symmetric chordal matrix :math:`U`. The Hessian operator
can be factored as
.. math::
\mathcal H_X(U) = \mathcal G_X^{\mathrm adj}( \mathcal G_X(U) )
where the mappings on the right-hand side are adjoint mappings
that map chordal symmetric matrices to chordal symmetric matrices.
This routine evaluates the mapping :math:`G_X` and its adjoint
:math:`G_X^{\mathrm adj}` as well as the corresponding inverse
mappings. The inputs `adj` and `inv` control the action as
follows:
+--------------------------------------------------+--------+-------+
| Action |`adj` | `inv` |
+==================================================+========+=======+
| :math:`U = \mathcal G_X(U)` | False | False |
+--------------------------------------------------+--------+-------+
| :math:`U = \mathcal G_X^{\mathrm adj}(U)` | True | False |
+--------------------------------------------------+--------+-------+
| :math:`U = \mathcal G_X^{-1}(U)` | False | True |
+--------------------------------------------------+--------+-------+
| :math:`U = \mathcal (G_X^{\mathrm adj})^{-1}(U)` | True | True |
+--------------------------------------------------+--------+-------+
The input argument :math:`L` is the Cholesky factor of
:math:`X`. The input argument :math:`Y` is the projected inverse of
:math:`X`. The input argument :math:`U` is either a chordal matrix (a
:py:class:`cspmatrix`) of a list of chordal matrices with the same
sparsity pattern as :math:`L` and :math:`Y`.
The optional argument `factored_updates` can be used to enable (if
True) or disable (if False) updating of intermediate
factorizations.
:param L: :py:class:`cspmatrix` (factor)
:param Y: :py:class:`cspmatrix`
:param U: :py:class:`cspmatrix` or list of :py:class:`cspmatrix` objects
:param adj: boolean
:param inv: boolean
:param factored_updates: boolean
""" |
assert L.symb == Y.symb, "Symbolic factorization mismatch"
assert isinstance(L, cspmatrix) and L.is_factor is True, "L must be a cspmatrix factor"
assert isinstance(Y, cspmatrix) and Y.is_factor is False, "Y must be a cspmatrix"
if isinstance(U, cspmatrix):
assert U.is_factor is False,\
"U must be a cspmatrix or a list of cbsmatrices"
U = [U]
else:
for Ut in U:
assert Ut.symb == L.symb, "Symbolic factorization mismatch"
assert isinstance(Ut, cspmatrix) and Ut.is_factor is False,\
"U must be a cspmatrix or a list of cbsmatrices"
if adj is False and inv is False:
__Y2K(L, U, inv = inv)
__scale(L, Y, U, inv = inv, adj = adj, factored_updates = factored_updates)
elif adj is True and inv is False:
__scale(L, Y, U, inv = inv, adj = adj, factored_updates = factored_updates)
__M2T(L, U, inv = inv)
elif adj is True and inv is True:
__M2T(L, U, inv = inv)
__scale(L, Y, U, inv = inv, adj = adj, factored_updates = factored_updates)
elif adj is False and inv is True:
__scale(L, Y, U, inv = inv, adj = adj, factored_updates = factored_updates)
__Y2K(L, U, inv = inv)
elif adj is None and inv is False:
__Y2K(L, U, inv = inv)
__scale(L, Y, U, inv = inv, adj = False, factored_updates = factored_updates)
__scale(L, Y, U, inv = inv, adj = True, factored_updates = factored_updates)
__M2T(L, U, inv = inv)
elif adj is None and inv is True:
__M2T(L, U, inv = inv)
__scale(L, Y, U, inv = inv, adj = True, factored_updates = factored_updates)
__scale(L, Y, U, inv = inv, adj = False, factored_updates = factored_updates)
__Y2K(L, U, inv = inv)
return |
<SYSTEM_TASK:>
The byte size of a single frame of this format.
<END_TASK>
<USER_TASK:>
Description:
def frame_size(self):
"""The byte size of a single frame of this format.""" |
if self.sample_type == SampleType.S16NativeEndian:
# Sample size is 2 bytes
return self.sample_size * self.channels
else:
raise ValueError('Unknown sample type: %d', self.sample_type) |
<SYSTEM_TASK:>
A class-decorator that creates layout managers with a set of named
<END_TASK>
<USER_TASK:>
Description:
def add_fields(store_name, field_names):
"""
A class-decorator that creates layout managers with a set of named
fields.
""" |
def decorate(cls):
def _add(index, name):
def _set_dir(self, value): getattr(self, store_name)[index] = value
def _get_dir(self): return getattr(self, store_name)[index]
setattr(cls, name, property(_get_dir, _set_dir))
for index, field_name in enumerate(field_names):
_add(index, field_name)
return cls
return decorate |
<SYSTEM_TASK:>
A utility method to return the minimum size needed to fit
<END_TASK>
<USER_TASK:>
Description:
def _get_smallest_dimensions(self, data):
"""A utility method to return the minimum size needed to fit
all the elements in.""" |
min_width = 0
min_height = 0
for element in self.elements:
if not element: continue
size = element.get_minimum_size(data)
min_width = max(min_width, size.x)
min_height = max(min_height, size.y)
return datatypes.Point(min_width, min_height) |
<SYSTEM_TASK:>
Converts the value into a DateTime object before setting.
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw):
"""Converts the value into a DateTime object before setting.
""" |
if value:
try:
value = DateTime(value)
except SyntaxError:
logger.warn("Value '{}' is not a valid DateTime string"
.format(value))
return False
self._set(instance, value, **kw) |
<SYSTEM_TASK:>
Decodes base64 value and set the file object
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw):
"""Decodes base64 value and set the file object
""" |
value = str(value).decode("base64")
# handle the filename
if "filename" not in kw:
logger.debug("FielFieldManager::set: No Filename detected "
"-> using title or id")
kw["filename"] = kw.get("id") or kw.get("title")
self._set(instance, value, **kw) |
<SYSTEM_TASK:>
Set the value of the refernce field
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw): # noqa
"""Set the value of the refernce field
""" |
ref = []
# The value is an UID
if api.is_uid(value):
ref.append(api.get_object_by_uid(value))
# The value is already an object
if api.is_at_content(value):
ref.append(value)
# The value is a dictionary
# -> handle it like a catalog query
if u.is_dict(value):
results = api.search(portal_type=self.allowed_types, **value)
ref = map(api.get_object, results)
# The value is a list
if u.is_list(value):
for item in value:
# uid
if api.is_uid(item):
ref.append(api.get_object_by_uid(item))
continue
# object
if api.is_at_content(item):
ref.append(api.get_object(item))
continue
# path
if api.is_path(item):
ref.append(api.get_object_by_path(item))
continue
# dict (catalog query)
if u.is_dict(item):
# If there is UID of objects, just use it.
uid = item.get('uid', None)
if uid:
obj = api.get_object_by_uid(uid)
ref.append(obj)
else:
results = api.search(portal_type=self.allowed_types, **item)
objs = map(api.get_object, results)
ref.extend(objs)
continue
# Plain string
# -> do a catalog query for title
if isinstance(item, basestring):
results = api.search(portal_type=self.allowed_types, title=item)
objs = map(api.get_object, results)
ref.extend(objs)
continue
# The value is a physical path
if api.is_path(value):
ref.append(api.get_object_by_path(value))
# Handle non multi valued fields
if not self.multi_valued:
if len(ref) > 1:
raise ValueError("Multiple values given for single valued "
"field {}".format(repr(self.field)))
else:
ref = ref[0]
return self._set(instance, ref, **kw) |
<SYSTEM_TASK:>
Get the proxied field of this field
<END_TASK>
<USER_TASK:>
Description:
def get_proxy_field(self, instance):
"""Get the proxied field of this field
""" |
proxy_object = self.get_proxy_object(instance)
if not proxy_object:
return None
return proxy_object.getField(self.name) |
<SYSTEM_TASK:>
Set Analyses to an AR
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw):
"""
Set Analyses to an AR
:param instance: Analysis Request
:param value: Single AS UID or a list of dictionaries containing AS UIDs
:param kw: Additional keyword parameters passed to the field
""" |
if not isinstance(value, (list, tuple)):
value = [value]
uids = []
for item in value:
uid = None
if isinstance(item, dict):
uid = item.get("uid")
if api.is_uid(value):
uid = item
if uid is None:
logger.warn("Could extract UID of value")
continue
uids.append(uid)
analyses = map(api.get_object_by_uid, uids)
self._set(instance, analyses, **kw) |
<SYSTEM_TASK:>
Set the value of the uid reference field
<END_TASK>
<USER_TASK:>
Description:
def set(self, instance, value, **kw): # noqa
"""Set the value of the uid reference field
""" |
ref = []
# The value is an UID
if api.is_uid(value):
ref.append(value)
# The value is a dictionary, get the UIDs.
if u.is_dict(value):
ref = ref.append(value.get("uid"))
# The value is already an object
if api.is_at_content(value):
ref.append(value)
# The value is a list
if u.is_list(value):
for item in value:
# uid
if api.is_uid(item):
ref.append(item)
# dict (catalog query)
elif u.is_dict(item):
# If there is UID of objects, just use it.
uid = item.get('uid', None)
if uid:
ref.append(uid)
# Handle non multi valued fields
if not self.multi_valued:
if len(ref) > 1:
raise ValueError("Multiple values given for single valued "
"field {}".format(repr(self.field)))
else:
ref = ref[0]
return self._set(instance, ref, **kw) |
<SYSTEM_TASK:>
extract the data of the content and return it as a dictionary
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" extract the data of the content and return it as a dictionary
""" |
# 1. extract the schema fields
data = self.extract_fields()
# 2. include custom key-value pairs listed in the mapping dictionary
for key, attr in self.attributes.iteritems():
if key in self.ignore:
continue # skip ignores
# fetch the mapped attribute
value = getattr(self.context, attr, None)
if value is None:
value = getattr(self, attr, None)
# handle function calls
if callable(value):
value = value()
# map the value to the given key from the mapping
data[key] = api.to_json_value(self.context, key, value)
return data |
<SYSTEM_TASK:>
Extract the given fieldnames from the object
<END_TASK>
<USER_TASK:>
Description:
def extract_fields(self):
"""Extract the given fieldnames from the object
:returns: Schema name/value mapping
:rtype: dict
""" |
# get the proper data manager for the object
dm = IDataManager(self.context)
# filter out ignored fields
fieldnames = filter(lambda name: name not in self.ignore, self.keys)
# schema mapping
out = dict()
for fieldname in fieldnames:
try:
# get the field value with the data manager
fieldvalue = dm.json_data(fieldname)
# https://github.com/collective/plone.jsonapi.routes/issues/52
# -> skip restricted fields
except Unauthorized:
logger.debug("Skipping restricted field '%s'" % fieldname)
continue
except ValueError:
logger.debug("Skipping invalid field '%s'" % fieldname)
continue
out[fieldname] = api.to_json_value(self.context, fieldname, fieldvalue)
return out |
<SYSTEM_TASK:>
Generate the physical path
<END_TASK>
<USER_TASK:>
Description:
def _x_get_physical_path(self):
"""Generate the physical path
""" |
path = self.context.getPath()
portal_path = api.get_path(api.get_portal())
if portal_path not in path:
return "{}/{}".format(portal_path, path)
return path |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.