text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def crop(self, data):
'''Crop a data dictionary down to its common time
Parameters
----------
data : dict
As produced by pumpp.transform
Returns
-------
data_cropped : dict
Like `data` but with all time-like axes truncated to the
minimum common duration
'''
duration = self.data_duration(data)
data_out = dict()
for key in data:
idx = [slice(None)] * data[key].ndim
for tdim in self._time.get(key, []):
idx[tdim] = slice(duration)
data_out[key] = data[key][tuple(idx)]
return data_out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the Mel spectrogram
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_mels)
The Mel spectrogram
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
mel = np.sqrt(melspectrogram(y=y, sr=self.sr,
n_fft=self.n_fft,
hop_length=self.hop_length,
n_mels=self.n_mels,
fmax=self.fmax)).astype(np.float32)
mel = fix_length(mel, n_frames)
if self.log:
mel = amplitude_to_db(mel, ref=np.max)
return {'mag': mel.T[self.idx]} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def empty(self, duration):
'''Empty vector annotations.
This returns an annotation with a single observation
vector consisting of all-zeroes.
Parameters
----------
duration : number >0
Length of the track
Returns
-------
ann : jams.Annotation
The empty annotation
'''
ann = super(VectorTransformer, self).empty(duration)
ann.append(time=0, duration=duration, confidence=0,
value=np.zeros(self.dimension, dtype=np.float32))
return ann |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Apply the vector transformation.
Parameters
----------
ann : jams.Annotation
The input annotation
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['vector'] : np.ndarray, shape=(dimension,)
Raises
------
DataError
If the input dimension does not match
'''
_, values = ann.to_interval_values()
vector = np.asarray(values[0], dtype=self.dtype)
if len(vector) != self.dimension:
raise DataError('vector dimension({:0}) '
'!= self.dimension({:1})'
.format(len(vector), self.dimension))
return {'vector': vector} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def inverse(self, vector, duration=None):
'''Inverse vector transformer'''
ann = jams.Annotation(namespace=self.namespace, duration=duration)
if duration is None:
duration = 0
ann.append(time=0, duration=duration, value=vector)
return ann |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def empty(self, duration):
'''Empty label annotations.
Constructs a single observation with an empty value (None).
Parameters
----------
duration : number > 0
The duration of the annotation
'''
ann = super(DynamicLabelTransformer, self).empty(duration)
ann.append(time=0, duration=duration, value=None)
return ann |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Transform an annotation to dynamic label encoding.
Parameters
----------
ann : jams.Annotation
The annotation to convert
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['tags'] : np.ndarray, shape=(n, n_labels)
A time-varying binary encoding of the labels
'''
intervals, values = ann.to_interval_values()
# Suppress all intervals not in the encoder
tags = []
for v in values:
if v in self._classes:
tags.extend(self.encoder.transform([[v]]))
else:
tags.extend(self.encoder.transform([[]]))
tags = np.asarray(tags)
target = self.encode_intervals(duration, intervals, tags)
return {'tags': target} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Transform an annotation to static label encoding.
Parameters
----------
ann : jams.Annotation
The annotation to convert
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['tags'] : np.ndarray, shape=(n_labels,)
A static binary encoding of the labels
'''
intervals = np.asarray([[0, 1]])
values = list([obs.value for obs in ann])
intervals = np.tile(intervals, [len(values), 1])
# Suppress all intervals not in the encoder
tags = [v for v in values if v in self._classes]
if len(tags):
target = self.encoder.transform([tags]).astype(np.bool).max(axis=0)
else:
target = np.zeros(len(self._classes), dtype=np.bool)
return {'tags': target} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def inverse(self, encoded, duration=None):
'''Inverse static tag transformation'''
ann = jams.Annotation(namespace=self.namespace, duration=duration)
if np.isrealobj(encoded):
detected = (encoded >= 0.5)
else:
detected = encoded
for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]:
vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd)))
ann.append(time=0,
duration=duration,
value=vd,
confidence=encoded[vid])
return ann |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the time position encoding
Parameters
----------
y : np.ndarray
Audio buffer
Returns
-------
data : dict
data['relative'] = np.ndarray, shape=(n_frames, 2)
data['absolute'] = np.ndarray, shape=(n_frames, 2)
Relative and absolute time positional encodings.
'''
duration = get_duration(y=y, sr=self.sr)
n_frames = self.n_frames(duration)
relative = np.zeros((n_frames, 2), dtype=np.float32)
relative[:, 0] = np.cos(np.pi * np.linspace(0, 1, num=n_frames))
relative[:, 1] = np.sin(np.pi * np.linspace(0, 1, num=n_frames))
absolute = relative * np.sqrt(duration)
return {'relative': relative[self.idx],
'absolute': absolute[self.idx]} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add(self, operator):
'''Add an operation to this pump.
Parameters
----------
operator : BaseTaskTransformer, FeatureExtractor
The operation to add
Raises
------
ParameterError
if `op` is not of a correct type
'''
if not isinstance(operator, (BaseTaskTransformer, FeatureExtractor)):
raise ParameterError('operator={} must be one of '
'(BaseTaskTransformer, FeatureExtractor)'
.format(operator))
if operator.name in self.opmap:
raise ParameterError('Duplicate operator name detected: '
'{}'.format(operator))
super(Pump, self).add(operator)
self.opmap[operator.name] = operator
self.ops.append(operator) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False):
'''Apply the transformations to an audio file, and optionally JAMS object.
Parameters
----------
audio_f : str
Path to audio file
jam : optional, `jams.JAMS`, str or file-like
Optional JAMS object/path to JAMS file/open file descriptor.
If provided, this will provide data for task transformers.
y : np.ndarray
sr : number > 0
If provided, operate directly on an existing audio buffer `y` at
sampling rate `sr` rather than load from `audio_f`.
crop : bool
If `True`, then data are cropped to a common time index across all
fields. Otherwise, data may have different time extents.
Returns
-------
data : dict
Data dictionary containing the transformed audio (and annotations)
Raises
------
ParameterError
At least one of `audio_f` or `(y, sr)` must be provided.
'''
if y is None:
if audio_f is None:
raise ParameterError('At least one of `y` or `audio_f` '
'must be provided')
# Load the audio
y, sr = librosa.load(audio_f, sr=sr, mono=True)
if sr is None:
raise ParameterError('If audio is provided as `y`, you must '
'specify the sampling rate as sr=')
if jam is None:
jam = jams.JAMS()
jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)
# Load the jams
if not isinstance(jam, jams.JAMS):
jam = jams.load(jam)
data = dict()
for operator in self.ops:
if isinstance(operator, BaseTaskTransformer):
data.update(operator.transform(jam))
elif isinstance(operator, FeatureExtractor):
data.update(operator.transform(y, sr))
if crop:
data = self.crop(data)
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def sampler(self, n_samples, duration, random_state=None):
'''Construct a sampler object for this pump's operators.
Parameters
----------
n_samples : None or int > 0
The number of samples to generate
duration : int > 0
The duration (in frames) of each sample patch
random_state : None, int, or np.random.RandomState
If int, random_state is the seed used by the random number
generator;
If RandomState instance, random_state is the random number
generator;
If None, the random number generator is the RandomState instance
used by np.random.
Returns
-------
sampler : pumpp.Sampler
The sampler object
See Also
--------
pumpp.sampler.Sampler
'''
return Sampler(n_samples, duration,
random_state=random_state,
*self.ops) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def fields(self):
'''A dictionary of fields constructed by this pump'''
out = dict()
for operator in self.ops:
out.update(**operator.fields)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def layers(self):
'''Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields.
'''
layermap = dict()
for operator in self.ops:
if hasattr(operator, 'layers'):
layermap.update(operator.layers())
return layermap |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_transition_beat(self, p_self):
'''Set the beat-tracking transition matrix according to
self-loop probabilities.
Parameters
----------
p_self : None, float in (0, 1), or np.ndarray [shape=(2,)]
Optional self-loop probability(ies), used for Viterbi decoding
'''
if p_self is None:
self.beat_transition = None
else:
self.beat_transition = transition_loop(2, p_self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_transition_down(self, p_self):
'''Set the downbeat-tracking transition matrix according to
self-loop probabilities.
Parameters
----------
p_self : None, float in (0, 1), or np.ndarray [shape=(2,)]
Optional self-loop probability(ies), used for Viterbi decoding
'''
if p_self is None:
self.down_transition = None
else:
self.down_transition = transition_loop(2, p_self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Apply the beat transformer
Parameters
----------
ann : jams.Annotation
The input annotation
duration : number > 0
The duration of the audio
Returns
-------
data : dict
data['beat'] : np.ndarray, shape=(n, 1)
Binary indicator of beat/non-beat
data['downbeat'] : np.ndarray, shape=(n, 1)
Binary indicator of downbeat/non-downbeat
mask_downbeat : bool
True if downbeat annotations are present
'''
mask_downbeat = False
intervals, values = ann.to_interval_values()
values = np.asarray(values)
beat_events = intervals[:, 0]
beat_labels = np.ones((len(beat_events), 1))
idx = (values == 1)
if np.any(idx):
downbeat_events = beat_events[idx]
downbeat_labels = np.ones((len(downbeat_events), 1))
mask_downbeat = True
else:
downbeat_events = np.zeros(0)
downbeat_labels = np.zeros((0, 1))
target_beat = self.encode_events(duration,
beat_events,
beat_labels)
target_downbeat = self.encode_events(duration,
downbeat_events,
downbeat_labels)
return {'beat': target_beat,
'downbeat': target_downbeat,
'mask_downbeat': mask_downbeat} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def inverse(self, encoded, downbeat=None, duration=None):
'''Inverse transformation for beats and optional downbeats'''
ann = jams.Annotation(namespace=self.namespace, duration=duration)
beat_times = np.asarray([t for t, _ in self.decode_events(encoded,
transition=self.beat_transition,
p_init=self.beat_p_init,
p_state=self.beat_p_state) if _])
beat_frames = time_to_frames(beat_times,
sr=self.sr,
hop_length=self.hop_length)
if downbeat is not None:
downbeat_times = set([t for t, _ in self.decode_events(downbeat,
transition=self.down_transition,
p_init=self.down_p_init,
p_state=self.down_p_state) if _])
pickup_beats = len([t for t in beat_times
if t < min(downbeat_times)])
else:
downbeat_times = set()
pickup_beats = 0
value = - pickup_beats - 1
for beat_t, beat_f in zip(beat_times, beat_frames):
if beat_t in downbeat_times:
value = 1
else:
value += 1
confidence = encoded[beat_f]
ann.append(time=beat_t,
duration=0,
value=value,
confidence=confidence)
return ann |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Transform an annotation to the beat-position encoding
Parameters
----------
ann : jams.Annotation
The annotation to convert
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['position'] : np.ndarray, shape=(n, n_labels) or (n, 1)
A time-varying label encoding of beat position
'''
# 1. get all the events
# 2. find all the downbeats
# 3. map each downbeat to a subdivision counter
# number of beats until the next downbeat
# 4. pad out events to intervals
# 5. encode each beat interval to its position
boundaries, values = ann.to_interval_values()
# Convert to intervals and span the duration
# padding at the end of track does not propagate the right label
# this is an artifact of inferring end-of-track from boundaries though
boundaries = list(boundaries[:, 0])
if boundaries and boundaries[-1] < duration:
boundaries.append(duration)
intervals = boundaries_to_intervals(boundaries)
intervals, values = adjust_intervals(intervals, values,
t_min=0,
t_max=duration,
start_label=0,
end_label=0)
values = np.asarray(values, dtype=int)
downbeats = np.flatnonzero(values == 1)
position = []
for i, v in enumerate(values):
# If the value is a 0, mark it as X and move on
if v == 0:
position.extend(self.encoder.transform(['X']))
continue
# Otherwise, let's try to find the surrounding downbeats
prev_idx = np.searchsorted(downbeats, i, side='right') - 1
next_idx = 1 + prev_idx
if prev_idx >= 0 and next_idx < len(downbeats):
# In this case, the subdivision is well-defined
subdivision = downbeats[next_idx] - downbeats[prev_idx]
elif prev_idx < 0 and next_idx < len(downbeats):
subdivision = np.max(values[:downbeats[0]+1])
elif next_idx >= len(downbeats):
subdivision = len(values) - downbeats[prev_idx]
if subdivision > self.max_divisions or subdivision < 1:
position.extend(self.encoder.transform(['X']))
else:
position.extend(self.encoder.transform(['{:02d}/{:02d}'.format(subdivision, v)]))
dtype = self.fields[self.scope('position')].dtype
position = np.asarray(position)
if self.sparse:
position = position[:, np.newaxis]
target = self.encode_intervals(duration, intervals, position,
multi=False, dtype=dtype)
return {'position': target} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the tempogram
Parameters
----------
y : np.ndarray
Audio buffer
Returns
-------
data : dict
data['tempogram'] : np.ndarray, shape=(n_frames, win_length)
The tempogram
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
tgram = tempogram(y=y, sr=self.sr,
hop_length=self.hop_length,
win_length=self.win_length).astype(np.float32)
tgram = fix_length(tgram, n_frames)
return {'tempogram': tgram.T[self.idx]} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Apply the scale transform to the tempogram
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt)
The scale transform magnitude coefficients
'''
data = super(TempoScale, self).transform_audio(y)
data['temposcale'] = np.abs(fmt(data.pop('tempogram'),
axis=1,
n_fmt=self.n_fmt)).astype(np.float32)[self.idx]
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Apply the structure agreement transformation.
Parameters
----------
ann : jams.Annotation
The segment annotation
duration : number > 0
The target duration
Returns
-------
data : dict
data['agree'] : np.ndarray, shape=(n, n), dtype=bool
'''
intervals, values = ann.to_interval_values()
intervals, values = adjust_intervals(intervals, values,
t_min=0, t_max=duration)
# Re-index the labels
ids, _ = index_labels(values)
rate = float(self.hop_length) / self.sr
# Sample segment labels on our frame grid
_, labels = intervals_to_samples(intervals, ids, sample_size=rate)
# Make the agreement matrix
return {'agree': np.equal.outer(labels, labels)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the STFT magnitude and phase.
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT magnitude
data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT phase
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
D = stft(y, hop_length=self.hop_length,
n_fft=self.n_fft)
D = fix_length(D, n_frames)
mag, phase = magphase(D)
if self.log:
mag = amplitude_to_db(mag, ref=np.max)
return {'mag': mag.T[self.idx].astype(np.float32),
'phase': np.angle(phase.T)[self.idx].astype(np.float32)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the STFT with phase differentials.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
The STFT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
The unwrapped phase differential
'''
data = super(STFTPhaseDiff, self).transform_audio(y)
data['dphase'] = self.phase_diff(data.pop('phase'))
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _pad_nochord(target, axis=-1):
'''Pad a chord annotation with no-chord flags.
Parameters
----------
target : np.ndarray
the input data
axis : int
the axis along which to pad
Returns
-------
target_pad
`target` expanded by 1 along the specified `axis`.
The expanded dimension will be 0 when `target` is non-zero
before padding, and 1 otherwise.
'''
ncmask = ~np.max(target, axis=axis, keepdims=True)
return np.concatenate([target, ncmask], axis=axis) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def empty(self, duration):
'''Empty chord annotations
Parameters
----------
duration : number
The length (in seconds) of the empty annotation
Returns
-------
ann : jams.Annotation
A chord annotation consisting of a single `no-chord` observation.
'''
ann = super(ChordTransformer, self).empty(duration)
ann.append(time=0,
duration=duration,
value='N', confidence=0)
return ann |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def simplify(self, chord):
'''Simplify a chord string down to the vocabulary space'''
# Drop inversions
chord = re.sub(r'/.*$', r'', chord)
# Drop any additional or suppressed tones
chord = re.sub(r'\(.*?\)', r'', chord)
# Drop dangling : indicators
chord = re.sub(r':$', r'', chord)
# Encode the chord
root, pitches, _ = mir_eval.chord.encode(chord)
# Build the query
# To map the binary vector pitches down to bit masked integer,
# we just dot against powers of 2
P = 2**np.arange(12, dtype=int)
query = self.mask_ & pitches[::-1].dot(P)
if root < 0 and chord[0].upper() == 'N':
return 'N'
if query not in QUALITIES:
return 'X'
return '{}:{}'.format(PITCHES[root], QUALITIES[query]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_annotation(self, ann, duration):
'''Transform an annotation to chord-tag encoding
Parameters
----------
ann : jams.Annotation
The annotation to convert
duration : number > 0
The duration of the track
Returns
-------
data : dict
data['chord'] : np.ndarray, shape=(n, n_labels)
A time-varying binary encoding of the chords
'''
intervals, values = ann.to_interval_values()
chords = []
for v in values:
chords.extend(self.encoder.transform([self.simplify(v)]))
dtype = self.fields[self.scope('chord')].dtype
chords = np.asarray(chords)
if self.sparse:
chords = chords[:, np.newaxis]
target = self.encode_intervals(duration, intervals, chords,
multi=False, dtype=dtype)
return {'chord': target} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the CQT
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape = (n_frames, n_bins)
The CQT magnitude
data['phase']: np.ndarray, shape = mag.shape
The CQT phase
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
C = cqt(y=y, sr=self.sr, hop_length=self.hop_length,
fmin=self.fmin,
n_bins=(self.n_octaves * self.over_sample * 12),
bins_per_octave=(self.over_sample * 12))
C = fix_length(C, n_frames)
cqtm, phase = magphase(C)
if self.log:
cqtm = amplitude_to_db(cqtm, ref=np.max)
return {'mag': cqtm.T.astype(np.float32)[self.idx],
'phase': np.angle(phase).T.astype(np.float32)[self.idx]} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute CQT magnitude.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
The CQT magnitude
'''
data = super(CQTMag, self).transform_audio(y)
data.pop('phase')
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the CQT with unwrapped phase
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
CQT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, n_bins)
Unwrapped phase differential
'''
data = super(CQTPhaseDiff, self).transform_audio(y)
data['dphase'] = self.phase_diff(data.pop('phase'))
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the HCQT
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape = (n_frames, n_bins, n_harmonics)
The CQT magnitude
data['phase']: np.ndarray, shape = mag.shape
The CQT phase
'''
cqtm, phase = [], []
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
for h in self.harmonics:
C = cqt(y=y, sr=self.sr, hop_length=self.hop_length,
fmin=self.fmin * h,
n_bins=(self.n_octaves * self.over_sample * 12),
bins_per_octave=(self.over_sample * 12))
C = fix_length(C, n_frames)
C, P = magphase(C)
if self.log:
C = amplitude_to_db(C, ref=np.max)
cqtm.append(C)
phase.append(P)
cqtm = np.asarray(cqtm).astype(np.float32)
phase = np.angle(np.asarray(phase)).astype(np.float32)
return {'mag': self._index(cqtm),
'phase': self._index(phase)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _index(self, value):
'''Rearrange a tensor according to the convolution mode
Input is assumed to be in (channels, bins, time) format.
'''
if self.conv in ('channels_last', 'tf'):
return np.transpose(value, (2, 1, 0))
else: # self.conv in ('channels_first', 'th')
return np.transpose(value, (0, 2, 1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute HCQT magnitude.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
The CQT magnitude
'''
data = super(HCQTMag, self).transform_audio(y)
data.pop('phase')
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform_audio(self, y):
'''Compute the HCQT with unwrapped phase
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
CQT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, n_bins)
Unwrapped phase differential
'''
data = super(HCQTPhaseDiff, self).transform_audio(y)
data['dphase'] = self.phase_diff(data.pop('phase'))
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def fill_value(dtype):
'''Get a fill-value for a given dtype
Parameters
----------
dtype : type
Returns
-------
`np.nan` if `dtype` is real or complex
0 otherwise
'''
if np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.complexfloating):
return dtype(np.nan)
return dtype(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def empty(self, duration):
'''Create an empty jams.Annotation for this task.
This method should be overridden by derived classes.
Parameters
----------
duration : int >= 0
Duration of the annotation
'''
return jams.Annotation(namespace=self.namespace, time=0, duration=0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform(self, jam, query=None):
'''Transform jam object to make data for this task
Parameters
----------
jam : jams.JAMS
The jams container object
query : string, dict, or callable [optional]
An optional query to narrow the elements of `jam.annotations`
to be considered.
If not provided, all annotations are considered.
Returns
-------
data : dict
A dictionary of transformed annotations.
All annotations which can be converted to the target namespace
will be converted.
'''
anns = []
if query:
results = jam.search(**query)
else:
results = jam.annotations
# Find annotations that can be coerced to our target namespace
for ann in results:
try:
anns.append(jams.nsconvert.convert(ann, self.namespace))
except jams.NamespaceError:
pass
duration = jam.file_metadata.duration
# If none, make a fake one
if not anns:
anns = [self.empty(duration)]
# Apply transformations
results = []
for ann in anns:
results.append(self.transform_annotation(ann, duration))
# If the annotation range is None, it spans the entire track
if ann.time is None or ann.duration is None:
valid = [0, duration]
else:
valid = [ann.time, ann.time + ann.duration]
results[-1]['_valid'] = time_to_frames(valid, sr=self.sr,
hop_length=self.hop_length)
# Prefix and collect
return self.merge(results) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def encode_events(self, duration, events, values, dtype=np.bool):
'''Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
'''
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def encode_intervals(self, duration, intervals, values, dtype=np.bool,
multi=True, fill=None):
'''Encode labeled intervals as a time-series matrix.
Parameters
----------
duration : number
The duration (in frames) of the track
intervals : np.ndarray, shape=(n, 2)
The list of intervals
values : np.ndarray, shape=(n, m)
The (encoded) values corresponding to each interval
dtype : np.dtype
The desired output type
multi : bool
If `True`, allow multiple labels per interval.
fill : dtype (optional)
Optional default fill value for missing data.
If not provided, the default is inferred from `dtype`.
Returns
-------
target : np.ndarray, shape=(duration * sr / hop_length, m)
The labeled interval encoding, sampled at the desired frame rate
'''
if fill is None:
fill = fill_value(dtype)
frames = time_to_frames(intervals, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
values = values.astype(dtype)
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill)
for column, interval in zip(values, frames):
if multi:
target[interval[0]:interval[1]] += column
else:
target[interval[0]:interval[1]] = column
return target[:n_total] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def transform(self, y, sr):
'''Transform an audio signal
Parameters
----------
y : np.ndarray
The audio signal
sr : number > 0
The native sampling rate of y
Returns
-------
dict
Data dictionary containing features extracted from y
See Also
--------
transform_audio
'''
if sr != self.sr:
y = resample(y, sr, self.sr)
return self.merge([self.transform_audio(y)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def phase_diff(self, phase):
'''Compute the phase differential along a given axis
Parameters
----------
phase : np.ndarray
Input phase (in radians)
Returns
-------
dphase : np.ndarray like `phase`
The phase differential.
'''
if self.conv is None:
axis = 0
elif self.conv in ('channels_last', 'tf'):
axis = 0
elif self.conv in ('channels_first', 'th'):
axis = 1
# Compute the phase differential
dphase = np.empty(phase.shape, dtype=phase.dtype)
zero_idx = [slice(None)] * phase.ndim
zero_idx[axis] = slice(1)
else_idx = [slice(None)] * phase.ndim
else_idx[axis] = slice(1, None)
zero_idx = tuple(zero_idx)
else_idx = tuple(else_idx)
dphase[zero_idx] = phase[zero_idx]
dphase[else_idx] = np.diff(np.unwrap(phase, axis=axis), axis=axis)
return dphase |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def layers(self):
'''Construct Keras input layers for the given transformer
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
field keys.
'''
from keras.layers import Input
L = dict()
for key in self.fields:
L[key] = Input(name=key,
shape=self.fields[key].shape,
dtype=self.fields[key].dtype)
return L |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def n_frames(self, duration):
'''Get the number of frames for a given duration
Parameters
----------
duration : number >= 0
The duration, in seconds
Returns
-------
n_frames : int >= 0
The number of frames at this extractor's sampling rate and
hop length
'''
return int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tag(message):
# type: () -> None """ Tag the current commit with the current version. """ |
release_ver = versioning.current()
message = message or 'v{} release'.format(release_ver)
with conf.within_proj_dir():
log.info("Creating release tag")
git.tag(
author=git.latest_commit().author,
name='v{}'.format(release_ver),
message=message,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lint(exclude, skip_untracked, commit_only):
# type: (List[str], bool, bool) -> None """ Lint python files. Args: exclude (list[str]):
A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool):
If set to **True** it will skip all files not tracked by git. commit_only (bool):
Only lint files that are staged for commit. """ |
exclude = list(exclude) + conf.get('lint.exclude', [])
runner = LintRunner(exclude, skip_untracked, commit_only)
if not runner.run():
exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tool(name):
# type: (str) -> FunctionType """ Decorator for defining lint tools. Args: name (str):
The name of the tool. This name will be used to identify the tool in `pelconf.yaml`. """ |
global g_tools
def decorator(fn): # pylint: disable=missing-docstring
# type: (FunctionType) -> FunctionType
g_tools[name] = fn
return fn
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pep8_check(files):
# type: (List[str]) -> int """ Run code checks using pep8. Args: files (list[str]):
A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. pep8 tool is **very** fast. Especially compared to pylint and the bigger the code base the bigger the difference. If you want to reduce check times you might disable all pep8 checks in pylint and use pep8 for that. This way you use pylint only for the more advanced checks (the number of checks enabled in pylint will make a visible difference in it's run times). """ |
files = fs.wrap_paths(files)
cfg_path = conf.get_path('lint.pep8_cfg', 'ops/tools/pep8.ini')
pep8_cmd = 'pep8 --config {} {}'.format(cfg_path, files)
return shell.run(pep8_cmd, exit_on_error=False).return_code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pylint_check(files):
# type: (List[str]) -> int """ Run code checks using pylint. Args: files (list[str]):
A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. """ |
files = fs.wrap_paths(files)
cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini')
pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files)
return shell.run(pylint_cmd, exit_on_error=False).return_code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
# type: () -> bool """ Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise. """ |
with util.timed_block() as t:
files = self._collect_files()
log.info("Collected <33>{} <32>files in <33>{}s".format(
len(files), t.elapsed_s
))
if self.verbose:
for p in files:
log.info(" <0>{}", p)
# No files to lint - return success if empty runs are allowed.
if not files:
return self.allow_empty
with util.timed_block() as t:
results = self._run_checks(files)
log.info("Code checked in <33>{}s", t.elapsed_s)
success = True
for name, retcodes in results.items():
if any(x != 0 for x in retcodes):
success = False
log.err("<35>{} <31>failed with: <33>{}".format(
name, retcodes
))
return success |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_base_url(self, platform: str = "prod"):
"""Set Isogeo base URLs according to platform. :param str platform: platform to use. Options: * prod [DEFAULT] * qa * int """ |
platform = platform.lower()
self.platform = platform
if platform == "prod":
ssl = True
logging.debug("Using production platform.")
elif platform == "qa":
ssl = False
logging.debug("Using Quality Assurance platform (reduced perfs).")
else:
logging.error(
"Platform must be one of: {}".format(" | ".join(self.API_URLS.keys()))
)
raise ValueError(
3,
"Platform must be one of: {}".format(" | ".join(self.API_URLS.keys())),
)
# method ending
return (
platform.lower(),
self.API_URLS.get(platform),
self.APP_URLS.get(platform),
self.CSW_URLS.get(platform),
self.MNG_URLS.get(platform),
self.OC_URLS.get(platform),
ssl,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_uuid(self, in_uuid: str = str, mode: bool = 0):
"""Convert a metadata UUID to its URI equivalent. And conversely. :param str in_uuid: UUID or URI to convert :param int mode: conversion direction. Options: * 0 to HEX * 1 to URN (RFC4122) * 2 to URN (Isogeo specific style) """ |
# parameters check
if not isinstance(in_uuid, str):
raise TypeError("'in_uuid' expected a str value.")
else:
pass
if not checker.check_is_uuid(in_uuid):
raise ValueError("{} is not a correct UUID".format(in_uuid))
else:
pass
if not isinstance(mode, int):
raise TypeError("'mode' expects an integer value")
else:
pass
# handle Isogeo specific UUID in XML exports
if "isogeo:metadata" in in_uuid:
in_uuid = "urn:uuid:{}".format(in_uuid.split(":")[-1])
logging.debug("Isogeo UUUID URN spotted: {}".format(in_uuid))
else:
pass
# operate
if mode == 0:
return uuid.UUID(in_uuid).hex
elif mode == 1:
return uuid.UUID(in_uuid).urn
elif mode == 2:
urn = uuid.UUID(in_uuid).urn
return "urn:isogeo:metadata:uuid:{}".format(urn.split(":")[2])
else:
raise ValueError("'mode' must be one of: 0 | 1 | 2") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encoded_words_to_text(self, in_encoded_words: str):
"""Pull out the character set, encoding, and encoded text from the input encoded words. Next, it decodes the encoded words into a byte string, using either the quopri module or base64 module as determined by the encoding. Finally, it decodes the byte string using the character set and returns the result. See: - https://github.com/isogeo/isogeo-api-py-minsdk/issues/32 - https://dmorgan.info/posts/encoded-word-syntax/ :param str in_encoded_words: base64 or quori encoded character string. """ |
# handle RFC2047 quoting
if '"' in in_encoded_words:
in_encoded_words = in_encoded_words.strip('"')
# regex
encoded_word_regex = r"=\?{1}(.+)\?{1}([B|Q])\?{1}(.+)\?{1}="
# pull out
try:
charset, encoding, encoded_text = re.match(
encoded_word_regex, in_encoded_words
).groups()
except AttributeError:
logging.debug("Input text was not encoded into base64 or quori")
return in_encoded_words
# decode depending on encoding
if encoding == "B":
byte_string = base64.b64decode(encoded_text)
elif encoding == "Q":
byte_string = quopri.decodestring(encoded_text)
return byte_string.decode(charset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_isogeo_version(self, component: str = "api", prot: str = "https"):
"""Get Isogeo components versions. Authentication not required. :param str component: which platform component. Options: * api [default] * db * app """ |
# which component
if component == "api":
version_url = "{}://v1.{}.isogeo.com/about".format(prot, self.api_url)
elif component == "db":
version_url = "{}://v1.{}.isogeo.com/about/database".format(
prot, self.api_url
)
elif component == "app" and self.platform == "prod":
version_url = "https://app.isogeo.com/about"
elif component == "app" and self.platform == "qa":
version_url = "https://qa-isogeo-app.azurewebsites.net/about"
else:
raise ValueError(
"Component value must be one of: " "api [default], db, app."
)
# send request
version_req = requests.get(version_url, proxies=self.proxies, verify=self.ssl)
# checking response
checker.check_api_response(version_req)
# end of method
return version_req.json().get("version") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_edit_url( self, md_id: str = None, md_type: str = None, owner_id: str = None, tab: str = "identification", ):
"""Constructs the edition URL of a metadata. :param str md_id: metadata/resource UUID :param str owner_id: owner UUID :param str tab: target tab in the web form """ |
# checks inputs
if not checker.check_is_uuid(md_id) or not checker.check_is_uuid(owner_id):
raise ValueError("One of md_id or owner_id is not a correct UUID.")
else:
pass
if checker.check_edit_tab(tab, md_type=md_type):
pass
# construct URL
return (
"{}"
"/groups/{}"
"/resources/{}"
"/{}".format(self.APP_URLS.get(self.platform), owner_id, md_id, tab)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_view_url(self, webapp: str = "oc", **kwargs):
"""Constructs the view URL of a metadata. :param str webapp: web app destination :param dict kwargs: web app specific parameters. For example see WEBAPPS """ |
# build wbeapp URL depending on choosen webapp
if webapp in self.WEBAPPS:
webapp_args = self.WEBAPPS.get(webapp).get("args")
# check kwargs parameters
if set(webapp_args) <= set(kwargs):
# construct and return url
url = self.WEBAPPS.get(webapp).get("url")
return url.format(**kwargs)
else:
raise TypeError(
"'{}' webapp expects {} argument(s): {}."
" Args passed: {}".format(
webapp, len(webapp_args), webapp_args, kwargs
)
)
else:
raise ValueError(
"'{}' is not a recognized webapp among: {}."
" Try to register it.".format(self.WEBAPPS.keys(), webapp)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_webapp(self, webapp_name: str, webapp_args: list, webapp_url: str):
"""Register a new WEBAPP to use with the view URL builder. :param str webapp_name: name of the web app to register :param list webapp_args: dynamic arguments to complete the URL. Typically 'md_id'. :param str webapp_url: URL of the web app to register with args tags to replace. Example: 'https://www.ppige-npdc.fr/portail/geocatalogue?uuid={md_id}' """ |
# check parameters
for arg in webapp_args:
if arg not in webapp_url:
raise ValueError(
"Inconsistent web app arguments and URL."
" It should contain arguments to replace"
" dynamically. Example: 'http://webapp.com"
"/isogeo?metadata={md_id}'"
)
# register
self.WEBAPPS[webapp_name] = {"args": webapp_args, "url": webapp_url} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pages_counter(self, total: int, page_size: int = 100) -> int: """Simple helper to handle pagination. Returns the number of pages for a given number of results. :param int total: count of metadata in a search request :param int page_size: count of metadata to display in each page """ |
if total <= page_size:
count_pages = 1
else:
if (total % page_size) == 0:
count_pages = total / page_size
else:
count_pages = (total / page_size) + 1
# method ending
return int(count_pages) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def share_extender(self, share: dict, results_filtered: dict):
"""Extend share model with additional informations. :param dict share: share returned by API :param dict results_filtered: filtered search result """ |
# add share administration URL
creator_id = share.get("_creator").get("_tag")[6:]
share["admin_url"] = "{}/groups/{}/admin/shares/{}".format(
self.app_url, creator_id, share.get("_id")
)
# check if OpenCatalog is activated
opencat_url = "{}/s/{}/{}".format(
self.oc_url, share.get("_id"), share.get("urlToken")
)
if requests.head(opencat_url):
share["oc_url"] = opencat_url
else:
pass
# add metadata ids list
share["mds_ids"] = (i.get("_id") for i in results_filtered)
return share |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def credentials_loader(self, in_credentials: str = "client_secrets.json") -> dict: """Loads API credentials from a file, JSON or INI. :param str in_credentials: path to the credentials file. By default, look for a client_secrets.json file. """ |
accepted_extensions = (".ini", ".json")
# checks
if not path.isfile(in_credentials):
raise IOError("Credentials file doesn't exist: {}".format(in_credentials))
else:
in_credentials = path.normpath(in_credentials)
if path.splitext(in_credentials)[1] not in accepted_extensions:
raise ValueError(
"Extension of credentials file must be one of {}".format(
accepted_extensions
)
)
else:
kind = path.splitext(in_credentials)[1]
# load, check and set
if kind == ".json":
with open(in_credentials, "r") as f:
in_auth = json.loads(f.read())
# check structure
heads = ("installed", "web")
if not set(in_auth).intersection(set(heads)):
raise ValueError(
"Input JSON structure is not as expected."
" First key must be one of: {}".format(heads)
)
# set
if "web" in in_auth:
# json structure for group application
auth_settings = in_auth.get("web")
out_auth = {
"auth_mode": "group",
"client_id": auth_settings.get("client_id"),
"client_secret": auth_settings.get("client_secret"),
# if not specified, must be a former file then set classic scope
"scopes": auth_settings.get("scopes", ["resources:read"]),
"uri_auth": auth_settings.get("auth_uri"),
"uri_token": auth_settings.get("token_uri"),
"uri_base": self.get_url_base_from_url_token(
auth_settings.get("token_uri")
),
"uri_redirect": None,
}
else:
# assuming in_auth == 'installed'
auth_settings = in_auth.get("installed")
out_auth = {
"auth_mode": "user",
"client_id": auth_settings.get("client_id"),
"client_secret": auth_settings.get("client_secret"),
# if not specified, must be a former file then set classic scope
"scopes": auth_settings.get("scopes", ["resources:read"]),
"uri_auth": auth_settings.get("auth_uri"),
"uri_token": auth_settings.get("token_uri"),
"uri_base": self.get_url_base_from_url_token(
auth_settings.get("token_uri")
),
"uri_redirect": auth_settings.get("redirect_uris", None),
}
else:
# assuming file is an .ini
ini_parser = ConfigParser()
ini_parser.read(in_credentials)
# check structure
if "auth" in ini_parser._sections:
auth_settings = ini_parser["auth"]
else:
raise ValueError(
"Input INI structure is not as expected."
" Section of credentials must be named: auth"
)
# set
out_auth = {
"auth_mode": auth_settings.get("CLIENT_TYPE"),
"client_id": auth_settings.get("CLIENT_ID"),
"client_secret": auth_settings.get("CLIENT_SECRET"),
"uri_auth": auth_settings.get("URI_AUTH"),
"uri_token": auth_settings.get("URI_TOKEN"),
"uri_base": self.get_url_base_from_url_token(
auth_settings.get("URI_TOKEN")
),
"uri_redirect": auth_settings.get("URI_REDIRECT"),
}
# method ending
return out_auth |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure(username, password):
# type: (str, str) -> None """ Generate .pypirc config with the given credentials. Example: $ peltak pypi configure my_pypi_user my_pypi_pass """ |
from peltak.extra.pypi import logic
logic.gen_pypirc(username, password) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tr(self, subdomain: str, string_to_translate: str = "") -> str: """Returns translation of string passed. :param str subdomain: subpart of strings dictionary. Must be one of self.translations.keys() i.e. 'restrictions' :param str string_to_translate: string you want to translate """ |
if subdomain not in self.translations.keys():
raise ValueError(
"'{}' is not a correct subdomain."
" Must be one of {}".format(subdomain, self.translations.keys())
)
else:
pass
# translate
str_translated = self.translations.get(
subdomain, {"error": "Subdomain not found: {}".format(subdomain)}
).get(string_to_translate, "String not found")
# end of method
return str_translated |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optout_saved(sender, instance, **kwargs):
""" This is a duplicte of the view code for DRF to stop future internal Django implementations breaking. """ |
if instance.identity is None:
# look up using the address_type and address
identities = Identity.objects.filter_by_addr(
instance.address_type, instance.address
)
if identities.count() == 1:
instance.identity = identities[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ec2_client(region_name=None, aws_access_key_id=None, aws_secret_access_key=None):
"""Gets an EC2 client :return: boto3.client object :raises: AWSAPIError """ |
log = logging.getLogger(mod_logger + '.get_ec2_client')
# Connect to EC2 API
try:
client = boto3.client('ec2', region_name=region_name, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem connecting to EC2, please check AWS CLI and boto configuration, ensure ' \
'credentials and region are set appropriately.\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
else:
log.debug('Successfully created an EC2 client')
return client |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_eni_id(self, interface=1):
"""Given an interface number, gets the AWS elastic network interface associated with the interface. :param interface: Integer associated to the interface/device number :return: String Elastic Network Interface ID or None if not found :raises OSError, AWSAPIError, EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.get_eni_id')
# Get the instance-id
if self.instance_id is None:
msg = 'Instance ID not found for this machine'
log.error(msg)
raise OSError(msg)
log.info('Found instance ID: {i}'.format(i=self.instance_id))
log.debug('Querying EC2 instances...')
try:
response = self.client.describe_instances(
DryRun=False,
InstanceIds=[self.instance_id]
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query EC2 for instances\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
log.debug('Found instance info: {r}'.format(r=response))
# Find the ENI ID
log.info('Looking for the ENI ID to alias...')
eni_id = None
try:
for reservation in response['Reservations']:
for instance in reservation['Instances']:
if instance['InstanceId'] == self.instance_id:
for network_interface in instance['NetworkInterfaces']:
if network_interface['Attachment']['DeviceIndex'] == interface:
eni_id = network_interface['NetworkInterfaceId']
except KeyError:
_, ex, trace = sys.exc_info()
msg = 'ENI ID not found in AWS response for interface: {i}'.format(i=interface)
log.error(msg)
raise EC2UtilError, msg, trace
log.info('Found ENI ID: {e}'.format(e=eni_id))
return eni_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_secondary_ip(self, ip_address, interface=1):
"""Adds an IP address as a secondary IP address :param ip_address: String IP address to add as a secondary IP :param interface: Integer associated to the interface/device number :return: None :raises: AWSAPIError, EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.add_secondary_ip')
# Get the ENI ID
eni_id = self.get_eni_id(interface)
# Verify the ENI ID was found
if eni_id is None:
msg = 'Unable to find the corresponding ENI ID for interface: {i}'. \
format(i=interface)
log.error(msg)
raise EC2UtilError(msg)
else:
log.info('Found ENI ID: {e}'.format(e=eni_id))
# Assign the secondary IP address
log.info('Attempting to assign the secondary IP address...')
try:
self.client.assign_private_ip_addresses(
NetworkInterfaceId=eni_id,
PrivateIpAddresses=[
ip_address,
],
AllowReassignment=True
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to assign secondary IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
log.info('Successfully added secondary IP address {s} to ENI ID {e} on interface {i}'.format(
s=ip_address, e=eni_id, i=interface)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def associate_elastic_ip(self, allocation_id, interface=1, private_ip=None):
"""Given an elastic IP address and an interface number, associates the elastic IP to the interface number on this host. :param allocation_id: String ID for the elastic IP :param interface: Integer associated to the interface/device number :param private_ip: String IP address of the private IP address to assign :return: None :raises: OSError, AWSAPIError, EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.associate_elastic_ip')
if private_ip is None:
log.info('No private IP address provided, getting the primary IP'
'address on interface {i}...'.format(i=interface))
private_ip = get_ip_addresses()['eth{i}'.format(i=interface)]
log.info('Associating Elastic IP {e} on interface {i} on IP {p}'.format(
e=allocation_id, i=interface, p=private_ip))
# Get the ENI ID
log.info('Getting the ENI ID for interface: {i}'.format(i=interface))
eni_id = self.get_eni_id(interface)
# Verify the ENI ID was found
if eni_id is None:
msg = 'Unable to find the corresponding ENI ID for interface: {i}'. \
format(i=interface)
log.error(msg)
raise OSError(msg)
else:
log.info('Found ENI ID: {e}'.format(e=eni_id))
# Assign the secondary IP address
log.info('Attempting to assign the secondary IP address...')
try:
response = self.client.associate_address(
NetworkInterfaceId=eni_id,
AllowReassociation=True,
AllocationId=allocation_id,
PrivateIpAddress=private_ip
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to attach elastic IP address {a} to interface {i}\n{e}'.format(
a=allocation_id, i=interface, e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
code = response['ResponseMetadata']['HTTPStatusCode']
if code != 200:
msg = 'associate_address returned invalid code: {c}'.format(c=code)
log.error(msg)
raise AWSAPIError(msg)
log.info('Successfully associated elastic IP address ID {a} to interface {i} on ENI ID {e}'.format(
a=allocation_id, i=interface, e=eni_id)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def allocate_elastic_ip(self):
"""Allocates an elastic IP address :return: Dict with allocation ID and Public IP that were created :raises: AWSAPIError, EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.allocate_elastic_ip')
# Attempt to allocate a new elastic IP
log.info('Attempting to allocate an elastic IP...')
try:
response = self.client.allocate_address(
DryRun=False,
Domain='vpc'
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to allocate a new elastic IP address\n{e}'.format(e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
allocation_id = response['AllocationId']
public_ip = response['PublicIp']
log.info('Allocated Elastic IP with ID {a} and Public IP address {p}'.
format(a=allocation_id, p=public_ip))
# Verify the Address was allocated successfully
log.info('Verifying the elastic IP address was allocated and is available '
'for use...')
ready = False
verification_timer = [2]*60 + [5]*60 + [10]*18
num_checks = len(verification_timer)
for i in range(0, num_checks):
wait_time = verification_timer[i]
try:
self.client.describe_addresses(
DryRun=False,
AllocationIds=[allocation_id]
)
except ClientError:
_, ex, trace = sys.exc_info()
log.info('Elastic IP address {p} with Allocation ID {a} is not available for use, trying again in '
'{w} sec...\n{e}'.format(p=public_ip, a=allocation_id, w=wait_time, e=str(ex)))
time.sleep(wait_time)
else:
log.info('Elastic IP {p} with Allocation ID {a} is available for use'.format(
p=public_ip, a=allocation_id))
ready = True
break
if ready:
return {'AllocationId': allocation_id, 'PublicIp': public_ip}
else:
msg = 'Unable to verify existence of new Elastic IP {p} with Allocation ID: {a}'. \
format(p=public_ip, a=allocation_id)
log.error(msg)
raise EC2UtilError(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_elastic_ips(self):
"""Returns the elastic IP info for this instance any are attached :return: (dict) Info about the Elastic IPs :raises AWSAPIError """ |
log = logging.getLogger(self.cls_logger + '.get_elastic_ips')
instance_id = get_instance_id()
if instance_id is None:
log.error('Unable to get the Instance ID for this machine')
return
log.info('Found Instance ID: {i}'.format(i=instance_id))
log.info('Querying AWS for info about instance ID {i}...'.format(i=instance_id))
try:
instance_info = self.client.describe_instances(DryRun=False, InstanceIds=[instance_id])
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query AWS to get info for instance {i}\n{e}'.format(i=instance_id, e=ex)
log.error(msg)
raise AWSAPIError, msg, trace
# Get the list of Public/Elastic IPs for this instance
public_ips = []
for network_interface in instance_info['Reservations'][0]['Instances'][0]['NetworkInterfaces']:
network_interface_id = network_interface['NetworkInterfaceId']
log.info('Checking ENI: {n}...'.format(n=network_interface_id))
try:
public_ips.append(network_interface['Association']['PublicIp'])
except KeyError:
log.info('No Public IP found for Network Interface ID: {n}'.format(n=network_interface_id))
else:
log.info('Found public IP for Network Interface ID {n}: {p}'.format(
n=network_interface_id, p=network_interface['Association']['PublicIp']))
# Return if no Public/Elastic IPs found
if len(public_ips) == 0:
log.info('No Elastic IPs found for this instance: {i}'.format(i=instance_id))
return
else:
log.info('Found Public IPs: {p}'.format(p=public_ips))
# Get info for each Public/Elastic IP
try:
address_info = self.client.describe_addresses(DryRun=False, PublicIps=public_ips)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query AWS to get info for addresses {p}\n{e}'.format(p=public_ips, e=str(ex))
log.error(msg)
raise AWSAPIError, msg, trace
if not address_info:
msg = 'No address info return for Public IPs: {p}'.format(p=public_ips)
log.error(msg)
raise AWSAPIError(msg)
return address_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disassociate_elastic_ips(self):
"""For each attached Elastic IP, disassociate it :return: None :raises AWSAPIError """ |
log = logging.getLogger(self.cls_logger + '.disassociate_elastic_ips')
try:
address_info = self.get_elastic_ips()
except AWSAPIError:
_, ex, trace = sys.exc_info()
msg = 'Unable to determine Elastic IPs on this EC2 instance'
log.error(msg)
raise AWSAPIError, msg, trace
# Return is no elastic IPs were found
if not address_info:
log.info('No elastic IPs found to disassociate')
return
# Disassociate each Elastic IP
for address in address_info['Addresses']:
association_id = address['AssociationId']
public_ip = address['PublicIp']
log.info('Attempting to disassociate address {p} from Association ID: {a}'.format(
p=public_ip, a=association_id))
try:
self.client.disassociate_address(PublicIp=public_ip, AssociationId=association_id)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem disassociating Public IP {p} from Association ID {a}'.format(
p=public_ip, a=association_id)
log.error(msg)
raise AWSAPIError, msg, trace
else:
log.info('Successfully disassociated Public IP: {p}'.format(p=public_ip)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_security_groups_in_vpc(self, vpc_id=None):
"""Lists security groups in the VPC. If vpc_id is not provided, use self.vpc_id :param vpc_id: (str) VPC ID to list security groups for :return: (list) Security Group info :raises: AWSAPIError, EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.list_security_groups_in_vpc')
if vpc_id is None and self.vpc_id is not None:
vpc_id = self.vpc_id
else:
msg = 'Unable to determine VPC ID to use to create the Security Group'
log.error(msg)
raise EC2UtilError(msg)
# Create a filter on the VPC ID
filters = [
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
# Get a list of security groups in the VPC
log.info('Querying for a list of security groups in VPC ID: {v}'.format(v=vpc_id))
try:
security_groups = self.client.describe_security_groups(DryRun=False, Filters=filters)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to query AWS for a list of security groups in VPC ID: {v}\n{e}'.format(
v=vpc_id, e=str(ex))
raise AWSAPIError, msg, trace
return security_groups |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def revoke_security_group_ingress(self, security_group_id, ingress_rules):
"""Revokes all ingress rules for a security group bu ID :param security_group_id: (str) Security Group ID :param port: (str) TCP Port number :param ingress_rules: (list) List of IP permissions (see AWS API docs re: IpPermissions) :return: None :raises: AWSAPIError, EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.revoke_security_group_ingress')
log.info('Revoking ingress rules from security group: {g}'.format(g=security_group_id))
try:
self.client.revoke_security_group_ingress(
DryRun=False,
GroupId=security_group_id,
IpPermissions=ingress_rules)
except ClientError:
_, ex, trace = sys.exc_info()
msg = 'Unable to remove existing Security Group rules for port from Security Group: {g}\n{e}'.format(
g=security_group_id, e=str(ex))
raise AWSAPIError, msg, trace |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def launch_instance(self, ami_id, key_name, subnet_id, security_group_id=None, security_group_list=None, user_data_script_path=None, instance_type='t2.small', root_device_name='/dev/xvda'):
"""Launches an EC2 instance with the specified parameters, intended to launch an instance for creation of a CONS3RT template. :param ami_id: (str) ID of the AMI to launch from :param key_name: (str) Name of the key-pair to use :param subnet_id: (str) IF of the VPC subnet to attach the instance to :param security_group_id: (str) ID of the security group, of not provided the default will be applied appended to security_group_list if provided :param security_group_id_list: (list) of IDs of the security group, if not provided the default will be applied :param user_data_script_path: (str) Path to the user-data script to run :param instance_type: (str) Instance Type (e.g. t2.micro) :param root_device_name: (str) The device name for the root volume :return: """ |
log = logging.getLogger(self.cls_logger + '.launch_instance')
log.info('Launching with AMI ID: {a}'.format(a=ami_id))
log.info('Launching with Key Pair: {k}'.format(k=key_name))
if security_group_list:
if not isinstance(security_group_list, list):
raise EC2UtilError('security_group_list must be a list')
if security_group_id and security_group_list:
security_group_list.append(security_group_id)
elif security_group_id and not security_group_list:
security_group_list = [security_group_id]
log.info('Launching with security group list: {s}'.format(s=security_group_list))
user_data = None
if user_data_script_path is not None:
if os.path.isfile(user_data_script_path):
with open(user_data_script_path, 'r') as f:
user_data = f.read()
monitoring = {'Enabled': False}
block_device_mappings = [
{
'DeviceName': root_device_name,
'Ebs': {
'VolumeSize': 100,
'DeleteOnTermination': True
}
}
]
log.info('Attempting to launch the EC2 instance now...')
try:
response = self.client.run_instances(
DryRun=False,
ImageId=ami_id,
MinCount=1,
MaxCount=1,
KeyName=key_name,
SecurityGroupIds=security_group_list,
UserData=user_data,
InstanceType=instance_type,
Monitoring=monitoring,
SubnetId=subnet_id,
InstanceInitiatedShutdownBehavior='stop',
BlockDeviceMappings=block_device_mappings
)
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem launching the EC2 instance\n{e}'.format(n=ex.__class__.__name__, e=str(ex))
raise EC2UtilError, msg, trace
instance_id = response['Instances'][0]['InstanceId']
output = {
'InstanceId': instance_id,
'InstanceInfo': response['Instances'][0]
}
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ec2_instances(self):
"""Describes the EC2 instances :return: dict containing EC2 instance data :raises: EC2UtilError """ |
log = logging.getLogger(self.cls_logger + '.get_ec2_instances')
log.info('Describing EC2 instances...')
try:
response = self.client.describe_instances()
except ClientError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem describing EC2 instances\n{e}'.format(n=ex.__class__.__name__, e=str(ex))
raise EC2UtilError, msg, trace
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def docs_cli(ctx, recreate, gen_index, run_doctests):
# type: (click.Context, bool, bool, bool) -> None """ Build project documentation. This command will run sphinx-refdoc first to generate the reference documentation for the code base. Then it will run sphinx to generate the final docs. You can configure the directory that stores the docs source (index.rst, conf.py, etc.) using the DOC_SRC_PATH conf variable. In case you need it, the sphinx build directory is located in ``BUILD_DIR/docs``. The reference documentation will be generated for all directories listed under 'REFDOC_PATHS conf variable. By default it is empty so no reference docs are generated. Sample Config:: \b build_dir: '.build' docs: path: 'docs' reference: - 'src/mypkg' Examples:: \b $ peltak docs # Generate docs for the project $ peltak docs --no-index # Skip main reference index $ peltak docs --recreate --no-index # Build docs from clean slate """ |
if ctx.invoked_subcommand:
return
from peltak.logic import docs
docs.docs(recreate, gen_index, run_doctests) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_string(value):
"""Convert a value to a Unicode object for matching with a query. None becomes the empty string. Bytestrings are silently decoded. """ |
if six.PY2:
buffer_types = buffer, memoryview # noqa: F821
else:
buffer_types = memoryview
if value is None:
return u''
elif isinstance(value, buffer_types):
return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf8', 'ignore')
else:
return six.text_type(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a list or a tuple, the elements are joined with `separator`. """ |
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return six.text_type(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(version):
# type: (str) -> None """ Write the given version to the VERSION_FILE """ |
if not is_valid(version):
raise ValueError("Invalid version: ".format(version))
storage = get_version_storage()
storage.write(version) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_version_storage():
# type: () -> VersionStorage """ Get version storage for the given version file. The storage engine used depends on the extension of the *version_file*. """ |
version_file = conf.get_path('version_file', 'VERSION')
if version_file.endswith('.py'):
return PyVersionStorage(version_file)
elif version_file.endswith('package.json'):
return NodeVersionStorage(version_file)
else:
return RawVersionStorage(version_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, version):
# type: (str) -> None """ Write the project version to .py file. This will regex search in the file for a ``__version__ = VERSION_STRING`` and substitute the version string for the new version. """ |
with open(self.version_file) as fp:
content = fp.read()
ver_statement = "__version__ = '{}'".format(version)
new_content = RE_PY_VERSION.sub(ver_statement, content)
fs.write_file(self.version_file, new_content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loggray(x, a, b):
"""Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs.""" |
linval = 10.0 + 990.0 * (x-float(a))/(b-a)
return (np.log10(linval)-1.0)*0.5 * 255.0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rebin(self, factor):
""" I robustly rebin your image by a given factor. You simply specify a factor, and I will eventually take care of a crop to bring the image to interger-multiple-of-your-factor dimensions. Note that if you crop your image before, you must directly crop to compatible dimensions ! We update the binfactor, this allows you to draw on the image later, still using the orignial pixel coordinates. Here we work on the numpy array. """ |
if self.pilimage != None:
raise RuntimeError, "Cannot rebin anymore, PIL image already exists !"
if type(factor) != type(0):
raise RuntimeError, "Rebin factor must be an integer !"
if factor < 1:
return
origshape = np.asarray(self.numpyarray.shape)
neededshape = origshape - (origshape % factor)
if not (origshape == neededshape).all():
if self.verbose :
print "Rebinning %ix%i : I have to crop from %s to %s" % (factor, factor, origshape, neededshape)
self.crop(0, neededshape[0], 0, neededshape[1])
else:
if self.verbose :
print "Rebinning %ix%i : I do not need to crop" % (factor, factor)
self.numpyarray = rebin(self.numpyarray, neededshape/factor) # we call the rebin function defined below
# The integer division neededshape/factor is ok, we checked for this above.
self.binfactor = int(self.binfactor * factor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def makedraw(self):
"""Auxiliary method to make a draw object if not yet done. This is also called by changecolourmode, when we go from L to RGB, to get a new draw object. """ |
if self.draw == None:
self.draw = imdw.Draw(self.pilimage) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upsample(self, factor):
""" The inverse operation of rebin, applied on the PIL image. Do this before writing text or drawing on the image ! The coordinates will be automatically converted for you """ |
self.checkforpilimage()
if type(factor) != type(0):
raise RuntimeError, "Upsample factor must be an integer !"
if self.verbose:
print "Upsampling by a factor of %i" % factor
self.pilimage = self.pilimage.resize((self.pilimage.size[0] * factor, self.pilimage.size[1] * factor))
self.upsamplefactor = factor
self.draw = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drawpoint(self, x, y, colour = None):
""" Most elementary drawing, single pixel, used mainly for testing purposes. Coordinates are those of your initial image ! """ |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
(pilx, pily) = self.pilcoords((x,y))
self.draw.point((pilx, pily), fill = colour) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writetitle(self, titlestring, colour = None):
""" We write a title, centered below the image. """ |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
self.loadtitlefont()
imgwidth = self.pilimage.size[0]
imgheight = self.pilimage.size[1]
textwidth = self.draw.textsize(titlestring, font = self.titlefont)[0]
textxpos = imgwidth/2.0 - textwidth/2.0
textypos = imgheight - 30
self.draw.text((textxpos, textypos), titlestring, fill = colour, font = self.titlefont)
if self.verbose :
print "I've written a title on the image." |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeinfo(self, linelist, colour = None):
""" We add a longer chunk of text on the upper left corner of the image. Provide linelist, a list of strings that will be written one below the other. """ |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
self.loadinfofont()
for i, line in enumerate(linelist):
topspacing = 5 + (12 + 5)*i
self.draw.text((10, topspacing), line, fill = colour, font = self.infofont)
if self.verbose :
print "I've written some info on the image." |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drawstarslist(self, dictlist, r = 10, colour = None):
""" Calls drawcircle and writelable for an list of stars. Provide a list of dictionnaries, where each dictionnary contains "name", "x", and "y". """ |
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
for star in dictlist:
self.drawcircle(star["x"], star["y"], r = r, colour = colour, label = star["name"])
#self.writelabel(star["x"], star["y"], star["name"], r = r, colour = colour)
if self.verbose :
print "I've drawn %i stars." % len(dictlist) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tonet(self, outfile):
""" Writes the PIL image into a png. We do not want to flip the image at this stage, as you might have written on it ! """ |
self.checkforpilimage()
if self.verbose :
print "Writing image to %s...\n%i x %i pixels, mode %s" % (outfile, self.pilimage.size[0], self.pilimage.size[1], self.pilimage.mode)
self.pilimage.save(outfile, "PNG") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cors_setup(self, request):
""" Sets up the CORS headers response based on the settings used for the API. :param request: <pyramid.request.Request> """ |
def cors_headers(request, response):
if request.method.lower() == 'options':
response.headers.update({
'-'.join([p.capitalize() for p in k.split('_')]): v
for k, v in self.cors_options.items()
})
else:
origin = self.cors_options.get('access_control_allow_origin', '*')
expose_headers = self.cors_options.get('access_control_expose_headers', '')
response.headers['Access-Control-Allow-Origin'] = origin
if expose_headers:
response.headers['Access-Control-Expose-Headers'] = expose_headers
# setup the CORS supported response
request.add_response_callback(cors_headers) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def factory(self, request, parent=None, name=None):
""" Returns a new service for the given request. :param request | <pyramid.request.Request> :return <pyramid_restful.services.AbstractService> """ |
traverse = request.matchdict['traverse']
# show documentation at the root path
if not traverse:
return {}
else:
service = {}
name = name or traverse[0]
# look for direct pattern matches
traversed = '/' + '/'.join(traverse)
service_type = None
service_object = None
for route, endpoint in self.routes:
result = route.match(traversed)
if result is not None:
request.matchdict = result
request.endpoint = endpoint
break
else:
try:
service_type, service_object = self.services[name]
except KeyError:
raise HTTPNotFound()
if service_type:
if isinstance(service_type, Endpoint):
service[name] = service_type
elif service_object is None:
service[name] = service_type(request)
else:
service[name] = service_type(request, service_object)
request.api_service = service
return service |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, service, name=''):
""" Exposes a given service to this API. """ |
# expose a sub-factory
if isinstance(service, ApiFactory):
self.services[name] = (service.factory, None)
# expose a module dynamically as a service
elif inspect.ismodule(service):
name = name or service.__name__.split('.')[-1]
# exclude endpoints with patterns
for obj in vars(service).values():
endpoint = getattr(obj, 'endpoint', None)
if isinstance(endpoint, Endpoint) and endpoint.pattern:
route = Route('', endpoint.pattern)
self.routes.append((route, endpoint))
self.services[name] = (ModuleService, service)
# expose a class dynamically as a service
elif inspect.isclass(service):
name = name or service.__name__
self.services[name] = (ClassService, service)
# expose an endpoint directly
elif isinstance(getattr(service, 'endpoint', None), Endpoint):
if service.endpoint.pattern:
route = Route('', service.endpoint.pattern)
self.routes.append((route, service.endpoint))
else:
self.services[service.endpoint.name] = (service.endpoint, None)
# expose a scope
elif isinstance(service, dict):
for srv in service.values():
try:
self.register(srv)
except RuntimeError:
pass
# expose a list of services
elif isinstance(service, list):
for srv in service:
try:
self.register(srv)
except RuntimeError:
pass
# expose a service directly
else:
raise RuntimeError('Invalid service provide: {0} ({1}).'.format(service, type(service))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serve(self, config, path, route_name=None, permission=None, **view_options):
""" Serves this API from the inputted root path """ |
route_name = route_name or path.replace('/', '.').strip('.')
path = path.strip('/') + '*traverse'
self.route_name = route_name
self.base_permission = permission
# configure the route and the path
config.add_route(route_name, path, factory=self.factory)
config.add_view(
self.handle_standard_error,
route_name=route_name,
renderer='json2',
context=StandardError
),
config.add_view(
self.handle_http_error,
route_name=route_name,
renderer='json2',
context=HTTPException
)
config.add_view(
self.process,
route_name=route_name,
renderer='json2',
**view_options
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def padd(text, padding="top", size=1):
""" Adds extra new lines to the top, bottom or both of a String @text: #str text to pad @padding: #str 'top', 'bottom' or 'all' @size: #int number of new lines -> #str padded @text .. from vital.debug import * padd("Hello world") # -> '\\nHello world' padd("Hello world", size=5, padding="all") # -> '\\n\\n\\n\\n\\nHello world\\n\\n\\n\\n\\n' .. """ |
if padding:
padding = padding.lower()
pad_all = padding == 'all'
padding_top = ""
if padding and (padding == 'top' or pad_all):
padding_top = "".join("\n" for x in range(size))
padding_bottom = ""
if padding and (padding == 'bottom' or pad_all):
padding_bottom = "".join("\n" for x in range(size))
return "{}{}{}".format(padding_top, text, padding_bottom)
return text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def colorize(text, color="BLUE", close=True):
""" Colorizes text for terminal outputs @text: #str to colorize @color: #str color from :mod:colors @close: #bool whether or not to reset the color -> #str colorized @text .. from vital.debug import colorize colorize("Hello world", "blue") # -> '\x1b[0;34mHello world\x1b[1;m' colorize("Hello world", "blue", close=False) # -> '\x1b[0;34mHello world' .. """ |
if color:
color = getattr(colors, color.upper())
return color + uncolorize(str(text)) + (colors.RESET if close else "")
return text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bold(text, close=True):
""" Bolds text for terminal outputs @text: #str to bold @close: #bool whether or not to reset the bold flag -> #str bolded @text .. from vital.debug import bold bold("Hello world") # -> '\x1b[1mHello world\x1b[1;m' bold("Hello world", close=False) # -> '\x1b[1mHello world' .. """ |
return getattr(colors, "BOLD") + str(text) + \
(colors.RESET if close else "") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_class_that_defined_method(meth):
""" Gets the class object which defined a given method @meth: a class method -> owner class object """ |
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(
inspect.getmodule(meth),
meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_obj_name(obj, delim="<>"):
""" Formats the object name in a pretty way @obj: any python object @delim: the characters to wrap a parent object name in -> #str formatted name .. from vital.debug import format_obj_name format_obj_name(vital.debug.Timer) # -> 'Timer<vital.debug>' format_obj_name(vital.debug) # -> 'debug<vital>' format_obj_name(vital.debug.Timer.time) # -> 'time<vital.debug.Timer>' .. """ |
pname = ""
parent_name = get_parent_name(obj)
if parent_name:
pname = "{}{}{}".format(delim[0], get_parent_name(obj), delim[1])
return "{}{}".format(get_obj_name(obj), pname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preprX(*attributes, address=True, full_name=False, pretty=False, keyless=False, **kwargs):
""" `Creates prettier object representations` @*attributes: (#str) instance attributes within the object you wish to display. Attributes can be recursive e.g. |one.two.three| for access to |self.one.two.three| @address: (#bool) |True| to include the memory address @full_name: (#bool) |True| to include the full path to the object vs. the qualified name @pretty: (#bool) |True| to allow bolding and coloring @keyless: (#bool) |True| to display the values of @attributes withotu their attribute names .. class Foo(object):
def __init__(self, bar, baz=None):
self.bar = bar self.baz = baz __repr__ = prepr('bar', 'baz', address=False) foo = Foo('foobar') repr(foo) .. |<Foo:bar=`foobar`, baz=None>| """ |
def _format(obj, attribute):
try:
if keyless:
val = getattr_in(obj, attribute)
if val is not None:
return repr(val)
else:
return '%s=%s' % (attribute,
repr(getattr_in(obj, attribute)))
except AttributeError:
return None
def prep(obj, address=address, full_name=full_name, pretty=pretty,
keyless=keyless, **kwargs):
if address:
address = ":%s" % hex(id(obj))
else:
address = ""
data = list(filter(lambda x: x is not None,
map(lambda a: _format(obj, a), attributes)))
if data:
data = ':%s' % ', '.join(data)
else:
data = ''
return stdout_encode("<%s%s%s>" % (get_obj_name(obj), data, address))
return prep |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.