content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def batch_post(
api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the batch API endpoint for events"""
res = post(api_key, host, "/batch/", gzip, timeout, **kwargs)
return _process_response(res, success_message="data uploaded successfully", return_json=False)
| 3,400 |
def log_csv(msg, level='info', path='test.csv', format=None, name='csvlog'):
"""csv으로 log 메시지를 보냄
Args:
msg (str): 메시지
name (str, optional): log 이름
path (str, optional): csv 저장 경로path
"""
csvlog = CsvLog(path=path, format=format, name=name)
if level == 'debug':
csvlog.logger.debug(msg)
elif level == 'info':
csvlog.logger.info(msg)
elif level == 'warning':
csvlog.logger.warning(msg)
elif level == 'error':
csvlog.logger.error(msg)
elif level == 'critical':
csvlog.logger.critical(msg)
else:
csvlog.logger.debug(msg)
| 3,401 |
def _raise_for_status(response: Response) -> None:
"""Check response status, raising ClickException for errors"""
try:
response.raise_for_status()
except HTTPError as e:
raise click.ClickException("%s" % e) from e
| 3,402 |
def load_ste_data(task_name: str) -> List[pd.DataFrame]:
"""Loads the STE data corresponding to the given task name.
Args:
task_name (str): The name of the STE data file.
Returns:
List[pd.DataFrame]: The STE data if found, else empty list.
"""
# Variant-aware STE task names
ste_task_variant_names = get_ste_data_names()
# Variant-agnostic STE task names
ste_task_base_names = set(
[task_name.split("_")[0] for task_name in ste_task_variant_names]
)
if task_name in ste_task_variant_names:
# Load variant-aware STE data
ste_file_name = l2l.get_l2root_base_dirs("taskinfo", task_name + ".pickle")
with open(ste_file_name, "rb") as ste_file:
ste_data = pickle.load(ste_file)
return ste_data
elif task_name in ste_task_base_names:
ste_data = []
# Load variant-agnostic STE data
for ste_variant_file in l2l.get_l2root_base_dirs("taskinfo").glob(
task_name + "*.pickle"
):
with open(ste_variant_file, "rb") as ste_file:
ste_data.extend(pickle.load(ste_file))
# Remove variant label from task names
for idx, ste_data_df in enumerate(ste_data):
ste_data[idx]["task_name"] = ste_data_df["task_name"].apply(
lambda x: x.split("_")[0]
)
return ste_data
else:
return []
| 3,403 |
def bsplclib_CacheD1(*args):
"""
* Perform the evaluation of the of the cache the parameter must be normalized between the 0 and 1 for the span. The Cache must be valid when calling this routine. Geom Package will insure that. and then multiplies by the weights this just evaluates the current point the CacheParameter is where the Cache was constructed the SpanLength is to normalize the polynomial in the cache to avoid bad conditioning effects
:param U:
:type U: float
:param Degree:
:type Degree: int
:param CacheParameter:
:type CacheParameter: float
:param SpanLenght:
:type SpanLenght: float
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param Weights:
:type Weights: TColStd_Array1OfReal &
:param Point:
:type Point: gp_Pnt
:param Vec:
:type Vec: gp_Vec
:rtype: void
* Perform the evaluation of the Bspline Basis and then multiplies by the weights this just evaluates the current point the parameter must be normalized between the 0 and 1 for the span. The Cache must be valid when calling this routine. Geom Package will insure that. and then multiplies by the weights ththe CacheParameter is where the Cache was constructed the SpanLength is to normalize the polynomial in the cache to avoid bad conditioning effectsis just evaluates the current point
:param U:
:type U: float
:param Degree:
:type Degree: int
:param CacheParameter:
:type CacheParameter: float
:param SpanLenght:
:type SpanLenght: float
:param Poles:
:type Poles: TColgp_Array1OfPnt2d
:param Weights:
:type Weights: TColStd_Array1OfReal &
:param Point:
:type Point: gp_Pnt2d
:param Vec:
:type Vec: gp_Vec2d
:rtype: void
"""
return _BSplCLib.bsplclib_CacheD1(*args)
| 3,404 |
def fileDialog2(bbo="int",cc="string",cap="string",ds="int",ff="string",fm="int",ftc="script",hne=1,okc="string",oca="script",ocm="script",oc2="script",ocr="script",oin="script",rf=1,sff="string",sc="script",spe=1,dir="string"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/fileDialog2.html
-----------------------------------------
fileDialog2 is undoable, NOT queryable, and NOT editable.
This command provides a dialog that allows users to select files or
directories.
-----------------------------------------
Return Value:
string array
-----------------------------------------
Flags:
-----------------------------------------
bbo : buttonBoxOrientation [int] []
* 1 Vertical button box layout. Cancel button is below the accept button. * 2 Horizontal button box layout. Cancel button is to the right of the accept button.
-----------------------------------------
cc : cancelCaption [string] []
If the dialogStyle flag is set to 2 then this provides a caption for the Cancel button within the dialog.
-----------------------------------------
cap : caption [string] []
Provide a title for the dialog.
-----------------------------------------
ds : dialogStyle [int] []
* 1 On Windows or Mac OS X will use a native style file dialog. * 2 Use a custom file dialog with a style that is consistent across platforms.
-----------------------------------------
ff : fileFilter [string] []
Provide a list of file type filters to the dialog. Multiple filters should be separated by double semi-colons. See the examples section.
-----------------------------------------
fm : fileMode [int] []
Indicate what the dialog is to return. * 0 Any file, whether it exists or not. * 1 A single existing file. * 2 The name of a directory. Both directories and files are displayed in the dialog. * 3 The name of a directory. Only directories are displayed in the dialog. * 4 Then names of one or more existing files.
-----------------------------------------
ftc : fileTypeChanged [script] []
MEL only. The string is interpreted as a MEL callback, to be called when the user-selected file type changes. The callback is of the form: global proc MyCustomFileTypeChanged(string $parent, string $newType) The parent argument is the parent layout into which controls have been added using the optionsUICreate flag. The newType argument is the new file type.
-----------------------------------------
hne : hideNameEdit [boolean] []
Hide name editing input field.
-----------------------------------------
okc : okCaption [string] []
If the dialogStyle flag is set to 2 then this provides a caption for the OK, or Accept, button within the dialog.
-----------------------------------------
oca : optionsUICancel [script] []
MEL only. The string is interpreted as a MEL callback, to be called when the dialog is cancelled (with Cancel button or close button to close window). The callback is of the form: global proc MyCustomOptionsUICancel()
-----------------------------------------
ocm : optionsUICommit [script] []
MEL only. The string is interpreted as a MEL callback, to be called when the dialog is successfully dismissed. It will not be called if the user cancels the dialog, or closes the window using window title bar controls or other window system means. The callback is of the form: global proc MyCustomOptionsUICommit(string $parent) The parent argument is the parent layout into which controls have been added using the optionsUICreate flag.
-----------------------------------------
oc2 : optionsUICommit2 [script] []
MEL only. As optionsUICommit, the given string is interpreted as a MEL callback, to be called when the dialog is successfully dismissed. The difference is that this callback takes one additional argument which is the file name selected by the user before the dialog validation. It will not be called if the user cancels the dialog, or closes the window using window title bar controls or other window system means. The callback is of the form: global proc MyCustomOptionsUICommit(string $parent, string $selectedFile) The parent argument is the parent layout into which controls have been added using the optionsUICreate flag.
-----------------------------------------
ocr : optionsUICreate [script] []
MEL only. The string is interpreted as a MEL callback, to be called on creation of the file dialog. The callback is of the form: global proc MyCustomOptionsUISetup(string $parent) The parent argument is the parent layout into which controls can be added. This parent is the right-hand pane of the file dialog.
-----------------------------------------
oin : optionsUIInit [script] []
MEL only. The string is interpreted as a MEL callback, to be called just after file dialog creation, to initialize controls. The callback is of the form: global proc MyCustomOptionsUIInitValues(string $parent, string $filterType) The parent argument is the parent layout into which controls have been added using the optionsUICreate flag. The filterType argument is the initial file filter.
-----------------------------------------
rf : returnFilter [boolean] []
If true, the selected filter will be returned as the last item in the string array along with the selected files.
-----------------------------------------
sff : selectFileFilter [string] []
Specify the initial file filter to select. Specify just the begining text and not the full wildcard spec.
-----------------------------------------
sc : selectionChanged [script] []
MEL only. The string is interpreted as a MEL callback, to be called when the user changes the file selection in the file dialog. The callback is of the form: global proc MyCustomSelectionChanged(string $parent, string $selection) The parent argument is the parent layout into which controls have been added using the optionsUICreate flag. The selection argument is the full path to the newly-selected file.
-----------------------------------------
spe : setProjectBtnEnabled [boolean] []
Define whether the project button should be enabled
-----------------------------------------
dir : startingDirectory [string]
Provide the starting directory for the dialog.
"""
| 3,405 |
def velocity_filter(freq, corr_spectrum, interstation_distance, cmin=1.0,
cmax=5.0, p=0.05):
"""
Filters a frequency-domain cross-spectrum so as to remove all signal
corresponding to a specified velocity range.
In practice, the procedure (i) inverse-Fourier transforms the cross spectrum
to the time domain; (ii) it zero-pads the resulting time-domain signal at
times corresponding to velocities outside the velocity range by applying a
cosine taper (the same cosine taper is applied at the two ends of the
interval); (iii) a forward-Fourier transform brings back the padded cross
correlation to the frequency domain [e.g., Magrini & Boschi 2021].
Parameters
----------
freq : ndarray of shape (n,)
Frequency vector
cross_spectrum : ndarray of shape (n,)
Complex-valued frequency-domain cross_spectrum
interstation_distance : float (in km)
cmin, cmax : float (in km/s)
Velocity range. Default values are 1 and 5
p : float
Decimal percentage of cosine taper. Default is 0.05 (5%)
Returns
-------
corr : ndarray of shape (n,)
Filtered cross-spectrum
References
----------
Magrini & Boschi 2021, Surface‐Wave Attenuation From Seismic Ambient Noise:
Numerical Validation and Application, JGR
"""
dt = 1 / (2 * freq[-1])
idx_tmin = int((interstation_distance/cmax)/dt * (1-p/2)) # 5percent extra for taper
idx_tmax = int((interstation_distance/cmin)/dt * (1+p/2)) # 5% extra for taper
vel_filt_window = cosine_taper(idx_tmax-idx_tmin, p=p)
tcorr = np.fft.irfft(corr_spectrum)
vel_filt = np.zeros(len(tcorr))
vel_filt[idx_tmin : idx_tmax] = vel_filt_window
vel_filt[-idx_tmax+1 : -idx_tmin+1] = vel_filt_window #+1 is just for symmetry reasons
tcorr *= vel_filt
corr = np.fft.rfft(tcorr)
return corr
| 3,406 |
def transcribe_file(path, language):
"""
Translate an PCM_16 encoded audio signal stored in a file using Google's STT API (Google Cloud Speech).
This implementation should be changed to transcribe audio-bytes directly.
:param path: path to audio file holding audio bytes
:param language: language of the text spoken in the audio signal
:return: string holding the transcription generated by Google Cloud Speech
or empty string if no transcription was found
"""
client = speech.SpeechClient()
with io.open(path, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
language_code = LANGUAGE_CODES[language]
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code=language_code)
# Detects speech in the audio file
response = client.recognize(config, audio)
if response and response.results:
return response.results[0].alternatives[0].transcript
return ''
| 3,407 |
def make_aware(dt):
"""Appends tzinfo and assumes UTC, if datetime object has no tzinfo already."""
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
| 3,408 |
def train(model, tokenizer, train_dataset, batch_size, lr, adam_epsilon,
epochs):
"""
:param model: Bert Model to train
:param tokenizer: Bert Tokenizer to train
:param train_dataset:
:param batch_size: Stick to 1 if not using using a high end GPU
:param lr: Suggested learning rate from paper is 5e-5
:param adam_epsilon: Used for weight decay fixed suggested parameter is
1e-8
:param epochs: Usually a single pass through the entire dataset is
satisfactory
:return: Loss
"""
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=batch_size)
t_total = len(train_dataloader) // batch_size # Total Steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=lr, eps=adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, 0, t_total)
# ToDo Case for fp16
# Start of training loop
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", batch_size)
model.train()
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(int(epochs), desc="Epoch")
for _ in train_iterator:
epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration")
for batch in epoch_iterator:
inputs, labels = mask_tokens(batch, tokenizer)
inputs = inputs.to('cuda') # Don't bother if you don't have a gpu
labels = labels.to('cuda')
outputs = model(inputs, masked_lm_labels=labels)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
loss.backward()
tr_loss += loss.item()
# if (step + 1) % 1 == 0: # 1 here is a placeholder for gradient
# accumulation steps
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
return model, tokenizer
| 3,409 |
def fastqcounter(infile):
"""
Returns the number of unique sequences in a fastq file
"""
#check if file is derep'd using DerepCheck()
derep = reptools.DerepCheck(infile)
n=0
if derep:
with open(infile) as fn:
for title,seq,qual in reptools.FASTQparser(fn):
n+=reptools.DerepCount(title)
else:
with open(infile) as fn:
for title,seq,qual in reptools.FASTQparser(fn):
n+=1
return(n)
| 3,410 |
def raichuMoves(board,player):
""""Generate All raichu Successors"""
piece = "@" if player == "w" else "$"
possible_boards = []
raichu_locs=[(row_i,col_i) for col_i in range(len(board[0])) for row_i in range(len(board)) if board[row_i][col_i]==piece]
for each_raichu in raichu_locs:
new_boards = raichu_move(board, player, piece, each_raichu[0], each_raichu[1])
if len(new_boards) == 0:
continue
possible_boards.extend(new_boards)
return possible_boards
| 3,411 |
def check_supported():
"""返回模块是否可用"""
return True
| 3,412 |
def handle_request_parsing_error(err):
""" This handles request parsing errors generated for example by schema
field validation failing."""
abort(HTTPStatus.BAD_REQUEST, errors=err.messages)
| 3,413 |
def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size):
"""
Propagate signal in batches
Parameters
----------
hamiltonian: tf.tensor
Drift Hamiltonian
hks: Union[tf.tensor, List[tf.tensor]]
List of control hamiltonians
signals: Union[tf.tensor, List[tf.tensor]]
List of control signals, one per control hamiltonian
dt: float
Length of one time slice
batch_size: int
Number of elements in one batch
Returns
-------
"""
if signals is not None:
batches = int(tf.math.ceil(signals.shape[0] / batch_size))
batch_array = tf.TensorArray(
signals.dtype, size=batches, dynamic_size=False, infer_shape=False
)
for i in range(batches):
batch_array = batch_array.write(
i, signals[i * batch_size : i * batch_size + batch_size]
)
else:
batches = int(tf.math.ceil(hamiltonian.shape[0] / batch_size))
batch_array = tf.TensorArray(
hamiltonian.dtype, size=batches, dynamic_size=False, infer_shape=False
)
for i in range(batches):
batch_array = batch_array.write(
i, hamiltonian[i * batch_size : i * batch_size + batch_size]
)
dUs_array = tf.TensorArray(tf.complex128, size=batches, infer_shape=False)
for i in range(batches):
x = batch_array.read(i)
if signals is not None:
result = tf_propagation_vectorized(hamiltonian, hks, x, dt)
else:
result = tf_propagation_vectorized(x, None, None, dt)
dUs_array = dUs_array.write(i, result)
return dUs_array.concat()
| 3,414 |
def test_filter_cancer_variants_wrong_params(app, institute_obj, case_obj):
"""test filter cancer SNV variants with filter form filled with parameters having the wrong format"""
# GIVEN an initialized app
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for("auto_login"))
# When a POST request with filter containing wrongly formatted parameters is sent
form_data = {
"control_frequency": "not a number!",
}
resp = client.post(
url_for(
"variants.cancer_variants",
institute_id=institute_obj["internal_id"],
case_name=case_obj["display_name"],
),
data=form_data,
)
# THEN it should return a redirected page
assert resp.status_code == 302
| 3,415 |
def update_emoji(payload):
"""Process an emoji update event."""
print("Got an emoji added event")
event = payload.get("event", {})
user_id = event.get("user")
ts = event.get("event_ts")
user = User(user_id)
if float(ts) < user.last_update:
return
else:
user.last_update = float(ts)
user.checkin()
| 3,416 |
def noise_dither_bayer(img:np.ndarray) -> np.ndarray:
"""Adds colored bayer dithering noise to the image.
Args:
img: Image to be dithered.
Returns:
version of the image with dithering applied.
"""
imgtype = img.dtype
size = img.shape
#Note: these are very slow for large images, must crop first before applying.
# Bayer works more or less. I think it's missing a part of the image, the
# dithering pattern is apparent, but the quantized (color palette) is not there.
# Still enough for models to learn dedithering
bayer_matrix = np.array([[0, 8, 2, 10], [12, 4, 14, 6], [3, 11, 1, 9], [15, 7, 13, 5]]) #/256 #4x4 Bayer matrix
bayer_matrix = bayer_matrix*16
red = img[:,:,2] #/255.
green = img[:,:,1] #/255.
blue = img[:,:,0] #/255.
img_split = np.zeros((img.shape[0], img.shape[1], 3), dtype = imgtype)
for values, color, channel in zip((red, green, blue), ('red', 'green', 'blue'), (2,1,0)):
for i in range(0, values.shape[0]):
for j in range(0, values.shape[1]):
x = np.mod(i, 4)
y = np.mod(j, 4)
if values[i, j] > bayer_matrix[x, y]:
img_split[i,j,channel] = 255 #1
dithered = img_split #*255.
return dithered
| 3,417 |
def get_match_rank(track, tagged_file):
"""
:param track:
:param files:
:type track: TrackMetadata
:return:
"""
filenames = [filter_filename(os.path.splitext(os.path.basename(filename.path))[0]) for filename in tagged_file]
rank1 = [0]*len(tagged_file)
# Alphabetically closest
lowest = 100000
index = -1
values = [0]*len(tagged_file)
for filename in filenames:
value = levenshtein(track.title, filename)
values[filenames.index(filename)] = value
if value < lowest:
lowest = value
index = filenames.index(filename)
print index
closest = get_close_matches(track.title, filenames)
if index != -1:
rank1[index] = 1
rank2 = [0]*len(tagged_file)
closest = min(tagged_file, key=lambda x: abs(track.get_duration_in_seconds() - x.length))
rank2[tagged_file.index(closest)] = 1
final_ranks = [0.5*rank1[i] + 0.5*rank2[i] for i in xrange(0, len(rank1))]
return final_ranks
| 3,418 |
def execshell_withpipe_ex(cmd, b_printcmd=True):
"""
Deprecated. Recommand using ShellExec.
"""
strfile = '/tmp/%s.%d.%d' % (
'shell_env.py', int(os.getpid()), random.randint(100000, 999999)
)
os.mknod(strfile)
cmd = cmd + ' 1>' + strfile + ' 2>/dev/null'
os.system(cmd)
if True == b_printcmd:
print(cmd)
fphandle = open(strfile, 'r')
lines = fphandle.readlines()
fphandle.close()
os.unlink(strfile)
return lines
| 3,419 |
def parseData(file_name, delimiter=None, header_size=0, col_types=None, ret_array=False):
""" Parse data form a text file
Arguments:
file_name: [str] Name of the input file.
Keyword arguments:
delimiter: [str] Data delimiter (often a comma of a semicolon). None by default, i.e. space/tab
delimited data
header_size: [int] Number of lines in the header of the file. 0 by defualt.
col_types: [type, or list of types] Define which columns are of which type. E.g. if all colums contain
floating point data, then you can specify:
col_types=float.
On the other hand, if the first colum
contains integer values, and second column contains floating point values, you can specify:
col_types=[int, float]
This argument is None by default, meaning that values will be left as strings.
ret_array: [bool] If True, the function returns a numpy array. If False, it returns a Pyhon list.
Be aware that if col_types are specified, and one of the types is float, the whole array will be
a float array. Furthermore, if some values in the read data are strings, the all values in the
numpy array will be strings are well.
Returns:
data_list: Python list if ret_array is False, numpy array if ret_array is True
"""
with open(file_name) as f:
# Skip header
for i in range(header_size):
next(f)
data_list = []
# Go through every line of the file
for line in f:
line = line.replace('\n', '').replace('\r', '')
# Split the line by the given delimiter
if delimiter is None:
line = line.split()
else:
line = line.split(delimiter)
# Convert the columns to given types
if col_types is not None:
if not isinstance(col_types, list):
col_types = [col_types]*len(line)
if len(line) == len(col_types):
for i, (tp, entry) in enumerate(zip(col_types, line)):
line[i] = tp(entry)
data_list.append(line)
# Convert the data to a numpy array
if ret_array:
data_list = np.array(data_list)
return data_list
| 3,420 |
def file_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> List[CommandResults]:
"""
Returns file's reputation
"""
files = argToList(args.get('file'))
since = convert_string_to_epoch_time(args.get('since'), arg_name='since')
until = convert_string_to_epoch_time(args.get('until'), arg_name='until')
limit = arg_to_number(args.get('limit'), arg_name='limit')
headers = argToList(args.get('headers'))
reliability = params.get('feedReliability')
results: List[CommandResults] = list()
for file in files:
if get_hash_type(file) not in ('sha256', 'sha1', 'md5'): # check file's validity
raise ValueError(f'Hash "{file}" is not of type SHA-256, SHA-1 or MD5')
try:
raw_response = client.file(file, since, until, limit)
except Exception as exception:
# If anything happens, handle like there are no results
err_msg = f'Could not process file: "{file}"\n {str(exception)}'
demisto.debug(err_msg)
raw_response = {}
if data := raw_response.get('data'):
score = calculate_dbot_score(reputation_data=data, params=params)
malicious_description = get_malicious_description(score, data, params)
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability,
malicious_description=malicious_description
)
if not headers:
headers = ['description', 'status', 'share_level', 'added_on', 'review_status', 'id', 'password',
'sample_size', 'sample_size_compressed', 'sample_type', 'victim_count', 'md5', 'sha1',
'sha256', 'sha3_384', 'ssdeep']
readable_output = tableToMarkdown(f'{CONTEXT_PREFIX} Result for file hash {file}', data, headers=headers)
data_entry = data[0]
file_indicator = Common.File(
dbot_score=dbot_score,
file_type=data_entry.get('sample_type'),
size=data_entry.get('sample_size'),
md5=data_entry.get('md5'),
sha1=data_entry.get('sha1'),
sha256=data_entry.get('sha256'),
ssdeep=data_entry.get('ssdeep'),
tags=data_entry.get('tags')
)
else: # no data
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=Common.DBotScore.NONE,
reliability=reliability
)
readable_output = f'{CONTEXT_PREFIX} does not have details about file: {file} \n'
file_indicator = Common.File(
dbot_score=dbot_score
)
result = CommandResults(
outputs_prefix=f'{CONTEXT_PREFIX}.File',
outputs_key_field='id',
outputs=data,
indicator=file_indicator,
readable_output=readable_output,
raw_response=raw_response
)
results.append(result)
return results
| 3,421 |
def run_task(client, cmd, cwd, prerequire=[], shell=False, quiet=False):
""" run cmd, in cwd
cmd should be a list (*args), if shell is False
when wildcards are used, shell should be Ture, and cmd is just a string
prerequire is a list of futures that must be gathered before the cmd can run
"""
if not quiet:
print(f"starting job {cmd} in {cwd}")
client.gather(prerequire)
return client.submit(subprocess.run, cmd,
stdout=subprocess.PIPE,stderr=subprocess.STDOUT,
shell=shell, check=True, cwd=cwd, key=create_uid())
| 3,422 |
def Kane_2D_builder(N,dis,mu,B=0,
params={},crystal='zincblende',
mesh=0,
sparse='yes'):
"""
2D 8-band k.p Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
wire which is infinite in one direction, decribed using 8-band k.p theory.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential.
B: float
Magnetic field along the wire's direction.
params: dic or str
Kane/Luttinger parameters of the k.p Hamiltonian. 'InAs', 'InSb',
'GaAs' and 'GaSb' selects the defult parameters for these materials.
crystal: {'zincblende','wurtzite','minimal'}
Crystal symmetry along the nanowire growth. 'minimal' is a minimal
model in which the intra-valence band coupling are ignored.
mesh: mesh
If the discretization is homogeneous, mesh=0. Otherwise, mesh
provides a mesh with the position of the sites in the mesh.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
if (params=={} or params=='InAs') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InSb') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 20.4, 8.3, 9.1
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 34.8, 15.5, 16.5
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 6.98, 2.06, 2.93
P, m_eff = 1097.45, 1.0
EF, Ecv, Evv, Ep = 0, -1519, -341, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
Ep=3/(0.063)/(3/np.abs(Ecv)+1/np.abs(Ecv+Evv))
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 13.4, 4.7, 6.0
P, m_eff = 971.3, 1.0
EF, Ecv, Evv, Ep = 0, -812, -760, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InAs') and (crystal=='wurtzite'):
m_eff = 1.0
D1,D2,D3,D4=100.3,102.3,104.1,38.8
A1,A2,A3,A4,A5,A6,A7=-1.5726,-1.6521,-2.6301,0.5126,0.1172,1.3103,-49.04
B1,B2,B3=-2.3925,2.3155,-1.7231
e1,e2=-3.2005,0.6363
P1,P2=838.6,689.87
alpha1,alpha2,alpha3=-1.89,-28.92,-51.17
beta1,beta2=-6.95,-21.71
gamma1,Ec, Ev=53.06,0,-664.9
elif crystal=='minimal' or crystal=='zincblende':
gamma0, gamma1, gamma2, gamma3 = params['gamma0'], params['gamma1'], params['gamma2'], params['gamma3']
P, m_eff = params['P'], params['m_eff']
EF, Ecv, Evv = params['EF'], params['Ecv'], params['Evv']
if crystal=='zincblende':
Ep=(cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
## Make sure that the onsite parameters are arrays:
Nx, Ny = N[0], N[1]
if np.ndim(dis)==0:
dis_x, dis_y = dis, dis
else:
dis_x, dis_y = dis[0], dis[1]
if np.isscalar(mesh):
xi_x, xi_y = np.ones(N), np.ones(N)
elif len(mesh)==2:
xi_x, xi_y = dis_x/mesh[0]*np.ones(N), dis_y/mesh[1]*np.ones(N)
else:
xi_x, xi_y = dis_x/mesh[0], dis_y/mesh[1]
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny))
#Number of bands and sites
m_b = 8 * Nx * Ny
m_s = Nx * Ny
#Obtain the eigenenergies:
tx=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3*(xi_x[1::,:]+xi_x[:-1,:])/2
ty=cons.hbar**2/(2*m_eff*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3*(xi_y[:,1::]+xi_y[:,:-1])/2
txy=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)*(dis_y*1e-9))/cons.e*1e3*np.append(np.zeros((1,Ny)),xi_x[1::,:]+xi_x[:-1,:],axis=0)/2*np.append(np.zeros((Nx,1)),xi_y[:,1::]+xi_y[:,:-1],axis=1)/2
txy=txy[1::,1::]
ax=(xi_x[1::,:]+xi_x[:-1,:])/2/(2*dis_x)
ay=(xi_y[:,1::]+xi_y[:,:-1])/2/(2*dis_y)
e = np.append(2*tx[0,:].reshape(1,Ny),np.append(tx[1::,:]+tx[:-1,:],2*tx[-1,:].reshape(1,Ny),axis=0),axis=0)
em = e - np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
e += np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
ty=np.insert(ty,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
ay=np.insert(ay,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
txy=np.insert(txy,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
e, em, mu, tx, ty = e.flatten(), em.flatten(), mu.flatten(), tx.flatten(), ty.flatten()
ax,ay=ax.flatten(),ay.flatten()
if not(B==0):
x, y = np.zeros(N), np.zeros(N)
if np.isscalar(mesh) and mesh==0:
mesh=np.ones((2,Nx,Ny))*dis[0]
for i in range(Nx):
for j in range(Ny):
x[i,j]=np.sum(mesh[0,0:i+1,j])-(Nx-1)*dis_x/2
y[i,j]=np.sum(mesh[1,i,0:j+1])-(Ny-1)*dis_y/2
for i in range(int((Nx-1)/2)):
x[Nx-i-1,:]=-x[i,:]
x[int((Nx-1)/2),:]=0
x=x/np.abs(x[0,0])*(Nx-1)*dis_x/2
for j in range(int((Ny-1)/2)):
y[:,Ny-j-1]=-y[:,j]
y[:,int((Ny-1)/2)]=0
y=y/np.abs(y[0,0])*(Ny-1)*dis_y/2
fact_B=cons.e/cons.hbar*1e-18
Mx, My = -fact_B*y/2*B, fact_B*x/2*B
Mx_kx, My_ky = (xi_x[1::,:]*Mx[1::,:]+xi_x[:-1,:]*Mx[:-1,:])/2/(2*dis_x), (xi_y[:,1::]*My[:,1::]+xi_y[:,:-1]*My[:,:-1])/2/(2*dis_y)
My_ky=np.insert(My_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mm_kx, Mm_ky = (xi_x[1::,:]*(Mx[1::,:]-1j*My[1::,:])+xi_x[:-1,:]*(Mx[:-1,:]-1j*My[:-1,:]))/2/(2*dis_x), -(xi_y[:,1::]*(Mx[:,1::]+1j*My[:,1::])+xi_y[:,:-1]*(Mx[:,:-1]+1j*My[:,:-1]))/2/(2*dis_y)
Mm_ky=np.insert(Mm_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mx, My = Mx.flatten(), My.flatten()
Mx_kx, My_ky = Mx_kx.flatten(), My_ky.flatten()
Mm_kx, Mm_ky = Mm_kx.flatten(), Mm_ky.flatten()
## Built the Hamiltonian:
if crystal=='zincblende':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
O1=(concatenate(((-1/np.sqrt(3)*(gamma2+2*gamma3))*em,-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(ty*(-1/np.sqrt(3)*(gamma2+2*gamma3))),ty*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3))),1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)))),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
B_s_m=(((Mx**2-My**2-2*1j*Mx*My)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k_m=(concatenate((2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## row 2:
# (2,4)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+2*m_s),np.append(index[1],O1[1][1]+4*m_s))
# (2,7)
args=np.append(args,-np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+7*m_s))
## row 3:
# (3,5)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+3*m_s),np.append(index[1],O1[1][1]+5*m_s))
# (3,6)
args=np.append(args,-np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+3*m_s),np.append(index[1],O1[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+4*m_s),np.append(index[1],O1[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+6*m_s))
# # If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
## row 2:
# (2,7)
args=np.append(args,-np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+7*m_s))
# (2,4)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+2*m_s),np.append(index[1],B_s_m[1][1]+4*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+2*m_s),np.append(index[1],B_k_m[1][1]+4*m_s))
## row 3:
# (3,5)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+3*m_s),np.append(index[1],B_s_m[1][1]+5*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+3*m_s),np.append(index[1],B_k_m[1][1]+5*m_s))
# (3,6)
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+3*m_s),np.append(index[1],B_s_m[1][0]+6*m_s))
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+3*m_s),np.append(index[1],B_k_m[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+4*m_s),np.append(index[1],B_s_m[1][0]+7*m_s))
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+4*m_s),np.append(index[1],B_k_m[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
elif crystal=='wurtzite':
Kc=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
Kp=(concatenate((ay,-ay,-1j*ax,1j*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
Kpc=(concatenate((em,-tx,-tx,ty,ty,-1j*txy[0:-1]/2,1j*txy/2,1j*txy/2,-1j*txy[0:-1]/2)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
### Upper diagonal:
## row 0:
# (0,1)
args=-A5*np.conj(Kpc[0])
index=(Kpc[1][1]+0,Kpc[1][0]+m_s)
# (0,2)
args=np.append(args,1j*(A7-alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+2*m_s))
# (0,4)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+4*m_s))
# (0,6)
args=np.append(args,-(P2-beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+0),np.append(index[1],Kp[1][0]+6*m_s))
## row 1:
# (1,2)
args=np.append(args,-1j*(A7+alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+2*m_s))
# (1,3)
args=np.append(args,-1j*alpha2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+m_s),np.append(index[1],Kp[1][0]+3*m_s))
# (1,5)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+5*m_s))
# (1,6)
args=np.append(args,(P2+beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (1,7)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+m_s),np.append(index[1],diagonal(m_s)[1]+7*m_s))
## row 2:
# (2,4)
args=np.append(args,np.sqrt(2)*D3*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+2*m_s),np.append(index[1],diagonal(m_s)[1]+4*m_s))
# (2,5)
args=np.append(args,-1j*alpha3*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (2,6)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (2,7)
args=np.append(args, beta2*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+2*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 3:
# (3,4)
args=np.append(args,-A5*Kpc[0])
index=(np.append(index[0],Kpc[1][0]+3*m_s),np.append(index[1],Kpc[1][1]+4*m_s))
# (3,5)
args=np.append(args,-1j*(A7-alpha1/np.sqrt(2))*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+5*m_s))
# (3,7)
args=np.append(args,(P2-beta1)/np.sqrt(2)*Kp[0])
index=(np.append(index[0],Kp[1][0]+3*m_s),np.append(index[1],Kp[1][1]+7*m_s))
## row 4:
# (4,5)
args=np.append(args,1j*(A7+alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+5*m_s))
# (4,6)
args=np.append(args,1j*np.sqrt(2)*D4*np.ones(m_s))
index=(np.append(index[0],diagonal(m_s)[0]+4*m_s),np.append(index[1],diagonal(m_s)[1]+6*m_s))
# (4,7)
args=np.append(args,-(P2+beta1)/np.sqrt(2)*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+4*m_s),np.append(index[1],Kp[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,-beta2*Kp[0])
index=(np.append(index[0],Kp[1][0]+5*m_s),np.append(index[1],Kp[1][1]+6*m_s))
# (5,7)
args=np.append(args, 1j*B2*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+7*m_s))
## row 6:
# (6,7)
args=np.append(args,-1j*gamma1*np.conj(Kp[0]))
index=(np.append(index[0],Kp[1][1]+6*m_s),np.append(index[1],Kp[1][0]+7*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+0),np.append(index[1],Kc[1][1]+0))
# (1,1)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+m_s),np.append(index[1],Kc[1][1]+m_s))
# (2,2)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+2*m_s),np.append(index[1],Kc[1][1]+2*m_s))
# (3,3)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+3*m_s),np.append(index[1],Kc[1][1]+3*m_s))
# (4,4)
args=np.append(args,(A2+A4)*Kc[0])
index=(np.append(index[0],Kc[1][0]+4*m_s),np.append(index[1],Kc[1][1]+4*m_s))
# (5,5)
args=np.append(args,(A2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+5*m_s),np.append(index[1],Kc[1][1]+5*m_s))
# (6,6)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+6*m_s),np.append(index[1],Kc[1][1]+6*m_s))
# (7,7)
args=np.append(args,(e2)*Kc[0])
index=(np.append(index[0],Kc[1][0]+7*m_s),np.append(index[1],Kc[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate(((D1+D2+Ev)*np.ones(m_s),(D1-D2+Ev)*np.ones(m_s),(Ev)*np.ones(m_s),
(D1+D2+Ev)*np.ones(m_s),(D1-D2+Ev)*np.ones(m_s),(Ev)*np.ones(m_s),
(Ec)*np.ones(m_s),(Ec)*np.ones(m_s)))
elif crystal=='minimal':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,gamma0*T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,gamma0*T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,gamma0*B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,gamma0*B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,gamma0*B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,gamma0*B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
return (H)
| 3,423 |
def has_loop(edges, threshold=2):
""" check if a list of edges representing a directed graph contains a loop
args:
edges: list of edge sets representing a directed graph i.e. [(1, 2), (2, 1)]
threshold: min number of nodes contained in loop
returns:
bool
"""
g = nx.DiGraph()
g.add_edges_from(edges)
return any(len(comp) >= threshold for comp in strongly_connected_components(g))
| 3,424 |
async def test_supported_features(hass):
"""Test supported features reporting."""
pause_play_stop = SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP
play_media = SUPPORT_PLAY_MEDIA
volume = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP
await async_setup_component(
hass,
MEDIA_DOMAIN,
{
MEDIA_DOMAIN: {
"platform": DOMAIN,
"entities": ["media_player.player_1", "media_player.player_2"],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"media_player.player_1", STATE_ON, {ATTR_SUPPORTED_FEATURES: 0}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
hass.states.async_set(
"media_player.player_1",
STATE_ON,
{ATTR_SUPPORTED_FEATURES: pause_play_stop},
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == pause_play_stop
hass.states.async_set(
"media_player.player_2",
STATE_OFF,
{ATTR_SUPPORTED_FEATURES: play_media | volume},
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== pause_play_stop | play_media | volume
)
hass.states.async_set(
"media_player.player_2", STATE_OFF, {ATTR_SUPPORTED_FEATURES: play_media}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.media_group")
assert state.attributes[ATTR_SUPPORTED_FEATURES] == pause_play_stop | play_media
| 3,425 |
def connect(host=None, dbname=None, user=None, password=None, minconn=1,
maxconn=4):
"""
Attempts to connect to Postgres.
"""
if not any((host, dbname, user, password)):
host, dbname, user, password = get_db_env()
if not any((host, dbname, user, password)):
raise Exception('No database connection provided or configured.')
return ThreadedConnectionPool(minconn, maxconn, host=host, dbname=dbname,
user=user, password=password)
| 3,426 |
def animate( data_cube, slit_data=None, slit_cmap="viridis", raster_pos=None, index_start=None, index_stop=None, interval_ms=50, gamma=0.4, figsize=(7,7), cutoff_percentile=99.9, save_path=None ):
"""
Creates an animation from the individual images of a data cube.
This function can be pretty slow and take 1-2 minutes.
Faster alternatives than matplotlib will be researched in the future.
Parameters
----------
data_cube : iris_data_cube
instance of sji_cube or raster_cube
slit_data : numpy.array
array with shape [n_steps, n_y] that is drawn on the slit for each step
slit_cmap : str
colormap to use for the visualisation of slit_data
raster_pos : int
If not None, only display images at raster postion *raster_pos*
index_start : int
index where to start animation (defaults to None -> will be set to 0)
index_stop : int
index where to stop animation (defaults to None -> will be set to n)
interval_ms : int
number of milliseconds between two frames
gamma : float
gamma correction for plotting: number between 0 (infinitely gamma correction) and 1 (no gamma correction)
figsize : tuple
figure size: (width,height)
cutoff_percentile : float
Often the maximum pixels shine out everything else, even after gamma correction. In order to reduce
this effect, the percentile at which to cut the intensity off can be specified with cutoff_percentile
in a range between 0 and 100.
save_path : str
path to file where animation output will be written to (use .mp4 extension)
Returns
-------
IPython.HTML :
HTML object with the animation
"""
# get number of steps
if raster_pos is None:
n = data_cube.shape[0]
else:
n = data_cube.get_raster_pos_steps( raster_pos )
# set default values for index_start and index_stop
if index_start is None:
index_start=0
if index_stop is None:
index_stop=n
# raise exception if there is a problem with i_start / i_stop
if index_stop > n or index_stop <= index_start:
raise Exception("Please make sure that index_start < index_stop < n_steps")
# release a duration warning
if index_stop-index_start > 100 and ir.config.verbosity_level >= 1:
print( "Creating animation with {} frames (this may take while)".format(index_stop-index_start) )
# initialize plot
fig = plt.figure( figsize=figsize )
image = data_cube.get_image_step( 0, raster_pos ).clip(min=0.01)**gamma
vmax = np.percentile( image, cutoff_percentile )
im = plt.imshow( image, cmap="gist_heat", vmax=vmax, origin='lower', interpolation="none" )
if slit_data is not None:
slit_pos = data_cube.get_slit_pos(0)
line = plt.scatter(
[slit_pos]*image.shape[0], np.arange(image.shape[0]),
c=slit_data[0,:], s=5, cmap=slit_cmap, marker='_',
vmin=np.min(slit_data), vmax=np.max(slit_data)
)
plt.colorbar()
# do nothing in the initialization function
def init():
return im,
# animation function
def animate(i, index_start):
xcenix = data_cube.headers[i+index_start]['XCENIX']
ycenix = data_cube.headers[i+index_start]['YCENIX']
date_obs = data_cube.headers[i+index_start]['DATE_OBS']
im.axes.set_title( "Frame {}: {}\nXCENIX: {:.3f}, YCENIX: {:.3f}".format( i+index_start, date_obs, xcenix, ycenix ) )
im.set_data( data_cube.get_image_step( i+index_start, raster_pos ).clip(min=0.01)**gamma )
if slit_data is not None:
slit_pos = data_cube.get_slit_pos(i)
line_data = np.vstack([[slit_pos]*image.shape[0], np.arange(image.shape[0])]).T
line.set_offsets(line_data)
line.set_array(slit_data[i,:])
return im,
# Call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, lambda i: animate(i, index_start), init_func=init, frames=index_stop-index_start, interval=interval_ms, blit=True)
# Close the plot
plt.close(anim._fig)
# Save animation if requested
if save_path is not None:
anim.save( save_path )
return HTML(anim.to_html5_video())
| 3,427 |
def build_vrt(in_vrts, out_vrt, pixel_function):
"""
in_vrts must be a list
out_vrt must be a path/to/filename.vrt
"""
projection, geotransform, raster_size_x, raster_size_y = get_info_vrt(in_vrts[0])
write_vrt(in_vrts, out_vrt, projection, geotransform, raster_size_x, raster_size_y, pixel_function)
| 3,428 |
def tau_profile(ncols,vshifts,vdop,which_line,wave_cut_off=2.0):
"""
Computes a Lyman-alpha Voigt profile for HI or DI given column density,
velocity centroid, and b parameter.
"""
## defining rest wavelength, oscillator strength, and damping parameter
if which_line == 'h1':
lam0s,fs,gammas=1215.67,0.4161,6.26e8
elif which_line == 'd1':
lam0s,fs,gammas=1215.3394,0.4161,6.27e8
elif which_line == 'mg2_h':
lam0s,fs,gammas=2796.3543,6.155E-01,2.625E+08
elif which_line == 'mg2_k':
lam0s,fs,gammas=2803.5315,3.058E-01,2.595E+08
else:
raise ValueError("which_line can only equal 'h1' or 'd1'!")
Ntot=10.**ncols # column density of H I gas
nlam=4000 # number of elements in the wavelength grid
xsections_onesided=np.zeros(nlam) # absorption cross sections as a
# fun<D-O>ction of wavelength (one side of transition)
u_parameter=np.zeros(nlam) # Voigt "u" parameter
nu0s=ccgs/(lam0s*1e-8) # wavelengths of Lyman alpha in frequency
nuds=nu0s*vdop/c_km # delta nus based off vdop parameter
a_parameter = np.abs(gammas/(4.*np.pi*nuds) ) # Voigt "a" parameter -- damping parameter
xsections_nearlinecenter = np.sqrt(np.pi)*(e**2)*fs*lam0s/(me*ccgs*vdop*1e13) # cross-sections
# near Lyman line center
wave_edge=lam0s - wave_cut_off # define wavelength cut off - this is important for the brightest lines and should be increased appropriately.
wave_symmetrical=np.zeros(2*nlam-1) # huge wavelength array centered around a Lyman transition
wave_onesided = np.zeros(nlam) # similar to wave_symmetrical, but not centered
# around a Lyman transition
lamshifts=lam0s*vshifts/c_km # wavelength shifts from vshifts parameter
## find end point for wave_symmetrical array and create wave_symmetrical array
num_elements = 2*nlam - 1
first_point = wave_edge
mid_point = lam0s
end_point = 2*(mid_point - first_point) + first_point
wave_symmetrical = np.linspace(first_point,end_point,num=num_elements)
wave_onesided = np.linspace(lam0s,wave_edge,num=nlam)
freq_onesided = ccgs / (wave_onesided*1e-8) ## convert "wave_onesided" array to a frequency array
u_parameter = (freq_onesided-nu0s)/nuds ## Voigt "u" parameter -- dimensionless frequency offset
xsections_onesided=xsections_nearlinecenter*voigt.voigt(a_parameter,u_parameter) ## cross-sections
# single sided
## can't do symmetrical
xsections_onesided_flipped = xsections_onesided[::-1]
## making the cross-sections symmetrical
xsections_symmetrical=np.append(xsections_onesided_flipped[0:nlam-1],xsections_onesided)
deltalam=np.max(wave_symmetrical)-np.min(wave_symmetrical)
dellam=wave_symmetrical[1]-wave_symmetrical[0]
nall=np.round(deltalam/dellam)
wave_all=deltalam*(np.arange(nall)/(nall-1))+wave_symmetrical[0]
tau_all = np.interp(wave_all,wave_symmetrical+lamshifts,xsections_symmetrical*Ntot)
return wave_all,tau_all
| 3,429 |
def main(args):
"""
chandl's entry point.
:param args: Command-line arguments, with the program in position 0.
"""
args = _parse_args(args)
# sort out logging output and level
level = util.log_level_from_vebosity(args.verbosity)
root = logging.getLogger()
root.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
root.addHandler(handler)
if level != logging.DEBUG:
requests.packages.urllib3.disable_warnings()
logger.debug(args)
try:
thread = Thread.from_url(args.url)
except (ValueError, IOError) as e:
_print_error('Error retrieving thread: {0}'.format(e))
return 1
posts = thread.posts
logger.debug('Thread contains %d posts', len(posts))
posts = _remove_unwanted(posts, args)
logger.debug('Will download %d posts', len(posts))
# check whether we still have anything to do
if not posts:
print('All files are either filtered out or excluded')
return 0
# use the first post to validate the --name
try:
post = posts[0]
args.name.format(**post.__dict__)
except KeyError as e:
_print_error('Invalid file name specifier: {0}'.format(e))
return 2
# set an appropriate thread_dir if one was not specified
if not args.thread_dir:
args.thread_dir = util.make_filename(thread.title)
# create --thread-dir
write_dir = os.path.abspath(os.path.join(args.output_dir, args.thread_dir))
if not os.path.isdir(write_dir):
try:
os.mkdir(write_dir, 0o700)
except OSError as e:
_print_error(
'Failed to create the thread directory at {0}: {1}'.format(
write_dir, e))
return 3
# show a relative path is there is a common directory (below root) between
# the `pwd` and the write_dir, otherwise show the absolute path
display_path = write_dir \
if os.path.dirname(os.path.commonprefix([write_dir,
os.getcwd()])) == '/' \
else os.path.relpath(write_dir, os.getcwd())
# download the files
print('Saving \'{0}\' to \'{1}\''.format(thread.title, display_path))
downloader = Downloader(write_dir, args.name, args.parallelism)
print(downloader.download(posts, level >= logging.WARNING))
return 0
| 3,430 |
def welcome_page():
""" On-boarding page
"""
g.project.update_on_boarding_state()
if g.project.on_boarding['import']:
return redirect(url_for('data_manager_blueprint.tasks_page'))
return flask.render_template(
'welcome.html',
config=g.project.config,
project=g.project,
on_boarding=g.project.on_boarding
)
| 3,431 |
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
| 3,432 |
def brokerUrl(host):
"""We use a different brokerUrl when running the workers than when
running within the flask app. Generate an appropriate URL with that in
mind"""
return '{broker_scheme}://{username}:{password}@{host}:{port}//'.format(
host=host, **CONFIG_JOB_QUEUE)
| 3,433 |
def displayStat(statDict):
"""
Display formated result into screen
args :
- statDict (dict): data to display
"""
totalOccur = sum(occur for word, occur in statDict["wordcount"])
print "\n"
print "{:<20} : {:>6}".format("Number of lines", statDict["nbLines"])
print "{:<20} : {:>6}".format("Number of words", statDict["nbWords"])
print "{:<20} : {:>6}".format("Number of characters", statDict["nbChars"])
print "\n"
print "-" * 46
print "{:^27} {:^7s} {:^10s}".format("WORD", "TIMES", "SCORE(%)")
print "-" * 46
for word, occur in statDict["wordcount"]:
print "{:<27} {:>7d} {:>10.2f}".format(word, occur, float(occur * 100)/totalOccur)
print "\n"
| 3,434 |
def show_lat_lon_gps(
move_data,
kind='scatter',
figsize=(21, 9),
plot_start_and_end=True,
return_fig=True,
save_fig=False,
name='show_gps_points.png',
):
"""
Generate a visualization with points [lat, lon] of dataset.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
kind : String, optional, default 'scatter'.
Represents chart type_.
figsize : tuple, optional, default (21,9).
Represents dimensions of figure.
plot_start_and_end: boolean
Whether to feature the start and end of the trajectory
return_fig : bool, optional, default True.
Represents whether or not to save the generated picture.
save_fig : bool, optional, default True.
Represents whether or not to save the generated picture.
name : String, optional, default 'show_gps_points.png'.
Represents name of a file.
Returns
-------
matplotlib.pyplot.figure or None
The generated picture.
"""
try:
if LATITUDE in move_data and LONGITUDE in move_data:
fig = move_data.drop_duplicates([LATITUDE, LONGITUDE]).plot(
kind=kind, x=LONGITUDE, y=LATITUDE, figsize=figsize
)
if plot_start_and_end:
plt.plot(
move_data.iloc[0][LONGITUDE],
move_data.iloc[0][LATITUDE],
'yo',
markersize=10,
) # start point
plt.plot(
move_data.iloc[-1][LONGITUDE],
move_data.iloc[-1][LATITUDE],
'yX',
markersize=10,
) # end point
if save_fig:
plt.savefig(name)
if return_fig:
return fig
except Exception as exception:
raise exception
| 3,435 |
def filter_seqlets(seqlet_acts, seqlet_intervals, genome_fasta_file, end_distance=100, verbose=True):
""" Filter seqlets by valid chromosome coordinates. """
# read chromosome lengths
chr_lengths = {}
genome_fasta_open = pysam.Fastafile(genome_fasta_file)
for chrom in genome_fasta_open.references:
chr_lengths[chrom] = genome_fasta_open.get_reference_length(chrom)
genome_fasta_open.close()
# check coordinates
filter_mask = np.zeros(len(seqlet_intervals), dtype='bool')
for si, seq_int in enumerate(seqlet_intervals):
left_valid = (seq_int.start > end_distance)
right_valid = (seq_int.end + end_distance < chr_lengths[seq_int.chr])
filter_mask[si] = left_valid and right_valid
if verbose:
print('Removing %d seqlets near chromosome ends.' % (len(seqlet_intervals) - filter_mask.sum()))
# filter
seqlet_acts = seqlet_acts[filter_mask]
seqlet_intervals = [seq_int for si, seq_int in enumerate(seqlet_intervals) if filter_mask[si]]
return seqlet_acts, seqlet_intervals
| 3,436 |
def load_RIMO(path, comm=None):
"""
Load and broadcast the reduced instrument model,
a.k.a. focal plane database.
"""
# Read database, parse and broadcast
if comm is not None:
comm.Barrier()
timer = Timer()
timer.start()
RIMO = {}
if comm is None or comm.rank == 0:
print("Loading RIMO from {}".format(path), flush=True)
hdulist = pf.open(path, "readonly")
detectors = hdulist[1].data.field("detector").ravel()
phi_uvs = hdulist[1].data.field("phi_uv").ravel()
theta_uvs = hdulist[1].data.field("theta_uv").ravel()
psi_uvs = hdulist[1].data.field("psi_uv").ravel()
psi_pols = hdulist[1].data.field("psi_pol").ravel()
epsilons = hdulist[1].data.field("epsilon").ravel()
fsamples = hdulist[1].data.field("f_samp").ravel()
fknees = hdulist[1].data.field("f_knee").ravel()
alphas = hdulist[1].data.field("alpha").ravel()
nets = hdulist[1].data.field("net").ravel()
fwhms = hdulist[1].data.field("fwhm").ravel()
for i in range(len(detectors)):
phi = (phi_uvs[i]) * degree
theta = theta_uvs[i] * degree
# Make sure we don't double count psi rotation already
# included in phi
psi = (psi_uvs[i] + psi_pols[i]) * degree - phi
quat = np.zeros(4)
# ZYZ conversion from
# http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19770024290.pdf
# Note: The above document has the scalar part of the quaternion at
# first position but quaternionarray module has it at the end, we
# use the quaternionarray convention
# scalar part:
quat[3] = np.cos(0.5 * theta) * np.cos(0.5 * (phi + psi))
# vector part
quat[0] = -np.sin(0.5 * theta) * np.sin(0.5 * (phi - psi))
quat[1] = np.sin(0.5 * theta) * np.cos(0.5 * (phi - psi))
quat[2] = np.cos(0.5 * theta) * np.sin(0.5 * (phi + psi))
# apply the bore sight rotation to the detector quaternion
quat = qa.mult(SPINROT, quat)
RIMO[detectors[i]] = DetectorData(
detectors[i],
phi_uvs[i],
theta_uvs[i],
psi_uvs[i],
psi_pols[i],
epsilons[i],
fsamples[i],
fknees[i],
alphas[i],
nets[i],
fwhms[i],
quat,
)
hdulist.close()
if comm is not None:
RIMO = comm.bcast(RIMO, root=0)
if comm is None or comm.rank == 0:
timer.report_clear("Load and broadcast RIMO")
return RIMO
| 3,437 |
def local_shuffle(bed, loc='500000'):
"""
Randomize the location of each interval in `bed` by moving its
start location to within `loc` bp of its current location or to
its containing interval in `loc`.
Arguments:
bed - input bed file
loc - shuffle intervals to within this distance (+ or -).
If not an integer, then this should be a BED file containing
regions such that each interval in `bed` is shuffled within
its containing interval in `loc`
"""
from random import randint
if str(loc).isdigit():
dist = abs(int(loc))
with nopen(bed) as fh:
for toks in (l.rstrip('\r\n').split('\t') for l in fh):
d = randint(-dist, dist)
toks[1:3] = [str(max(0, int(bloc) + d)) for bloc in toks[1:3]]
print("\t".join(toks))
else:
# we are using dist as the windows within which to shuffle
assert os.path.exists(loc)
bed4 = mktemp()
with open(bed4, 'w') as fh:
# this step is so we don't have to track the number of columns in A
for toks in reader(bed, header=False):
fh.write("%s\t%s\n" % ("\t".join(toks[:3]), SEP.join(toks)))
missing = 0
# we first find the b-interval that contains each a-interval by
# using bedtools intersect
for toks in reader("|bedtools intersect -wao -a {bed4} -b {loc}"
.format(**locals()), header=False):
ajoin = toks[:4]
a = ajoin[3].split(SEP) # extract the full interval
b = toks[4:]
if int(b[-1]) == 0:
missing += 1
continue
assert a[0] == b[0], ('chroms dont match', a, b)
alen = int(a[2]) - int(a[1])
# doesn't care if the new interval is completely contained in b
astart = randint(int(b[1]), int(b[2]))
# subtract half the time.
aend = (astart - alen) if randint(0, 1) == 0 and astart > alen \
else (astart + alen)
a[1], a[2] = map(str, (astart, aend) if astart < aend
else (aend, astart))
print("\t".join(a))
if missing > 0:
print >> sys.stderr, ("found {missing} intervals in {bed} that "
" were not contained in {loc}"
.format(**locals()))
| 3,438 |
def format_top(data):
"""
Format "top" output
:param data: dict
:return: list
"""
result = []
if data:
if 'Titles' in data:
result.append(data['Titles'])
if 'Processes' in data:
for process in data['Processes']:
result.append(process)
result = tabulate(result, headers='firstrow').split('\n')
return result
| 3,439 |
def process_arguments(parser):
"""This function parses the input arguments."""
args = parser.parse_args()
# Distribute input arguments
request = args.request
if "num_tests" in args:
num_tests = int(args.num_tests)
else:
num_tests = None
# Test validity of input arguments
if request not in ["check", "create"]:
raise AssertionError()
if num_tests not in [i for i in np.arange(1001)]:
raise AssertionError(9)
return request, num_tests
| 3,440 |
def generate_sections(logdata: pd.DataFrame):
"""
Generates a list of SectionDescriptors based on iMotions packets
SlideStart and SlideEnd.
If the first Slide related packet is an End packet, the first
descriptor will include all timestamps up to that packet, else it
will drop the packets before.
The last descriptor will include all packets until end.
Assumes that there are SlideStart and SlideEnd packages in data.
"""
slide_start = logdata.Name == 'SlideStart'
slide_end = logdata.Name == 'SlideEnd'
slides = slide_start | slide_end
log_slides = logdata[slides]
time_diffs = log_slides.Timestamp.diff()
sections = []
if log_slides.iloc[0].Name == 'SlideStart':
# Bootstrap condition
sections.append(SectionDescriptor(shortcut.row_index(log_slides.head(1))))
for label, timediff in time_diffs.iteritems():
if not sections:
# If first packet is a SlideEnd, we include all data before
sections.append(SectionDescriptor(0, label, logdata.loc[label].Timestamp))
elif not sections[-1].end:
sections[-1].end = label
sections[-1].duration = timediff
else:
sections.append(SectionDescriptor(label))
last_row = logdata.tail(1).Timestamp
last_label = shortcut.row_index(last_row)
last_timestamp = last_row.values[0]
sections[-1].end = last_label
sections[-1].duration = logdata.loc[last_label].Timestamp - logdata.loc[sections[-1].start].Timestamp
return sections
| 3,441 |
def etree2dict(element):
"""Convert an element tree into a dict imitating how Yahoo Pipes does it.
"""
i = dict(element.items())
i.update(_make_content(i, element.text, strip=True))
for child in element:
tag = child.tag
value = etree2dict(child)
i.update(_make_content(i, value, tag))
if element.text and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = i.get('content')
return i
| 3,442 |
def write_input_file(input_file, molecule, parameters):
"""Write LAMMPS input file"""
write_every = 10000
num_timesteps = int(parameters['sim_length'] / parameters['ts'] * 1e6)
mol_ids, surface_ids = parameters['mol_ids'], parameters['surface_ids']
atom_names = sorted(list(set(molecule.atoms)))
with open(input_file, 'w') as f:
f.write('log log.nanocar append\n')
f.write('units real\n')
f.write('atom_style full\n')
f.write('boundary p p p\n')
f.write('pair_style lj/cut 12.500\n')
f.write('pair_modify tail yes mix arithmetic\n')
f.write('read_data data.nanocar\n\n')
f.write('group mol id %i:%i\n' % (mol_ids[0], mol_ids[1]))
f.write('group surf id %i:%i\n' % (surface_ids[0], surface_ids[1]))
f.write('compute C1 mol com\n')
f.write('variable seed equal 123456\n')
f.write('variable T equal %i\n'% parameters['T'])
f.write('thermo %i\n' % write_every)
f.write('thermo_style custom step temp press etotal epair emol c_C1[1] c_C1[2] c_C1[3]\n')
f.write('velocity mol create $T ${seed} dist uniform\n')
f.write('timestep %.1f\n' % parameters['ts'])
f.write('variable txyz equal %i\n' % write_every)
f.write('dump 1 mol custom ${txyz} traj.xyz id element xu yu zu\n')
f.write('dump_modify 1 element %s\n\n' % ' '.join(atom_names))
f.write('fix RIG mol rigid/nvt single temp $T $T 100\n')
f.write('run %i\n' % num_timesteps)
f.write('unfix RIG\n')
| 3,443 |
def full_chain():
"""
GETing `/chain` will returns the full blockchain.
Returns:
The node's full blockchain list, as a JSON response.
"""
logger.info("Received GET request for the full chain")
return {
"chain": blockchain.chain,
"length": len(blockchain.chain),
}
| 3,444 |
def check_files(in_file, out_file, args):
""" Check files exist/don't exist.
Parameters
----------
in_file : str:
the input file
out_file : str
the output file
args : parser args
any additional arguments from the parser
Raises
------
FileNotFound
in case any of the files isn't found.
"""
if not path.exists(in_file):
raise FileNotFound("input file '%s' does not exist!" % in_file)
if path.exists(out_file):
if not args.force:
raise FileNotFound("output file '%s' exists!" % out_file)
else:
log.verbose("overwriting existing file: '%s'" % out_file)
log.verbose("input file is: '%s'" % in_file)
log.verbose("output file is: '%s'" % out_file)
| 3,445 |
def _patched_is_incomplete_option(all_args, cmd_param):
"""Patched version of is_complete_option.
Fixes issue testing a cmd param against the current list of
args. Upstream version does not consider combined short form args
and so a command like `guild check -nt <auto>` doesn't work. The
patched version considers that `t` above is the current param
option.
"""
from click import _bashcomplete
if not isinstance(cmd_param, _bashcomplete.Option):
return False
if cmd_param.is_flag:
return False
last_option = None
for index, arg_str in enumerate(
reversed([arg for arg in all_args if arg != _bashcomplete.WORDBREAK])
):
if index + 1 > cmd_param.nargs:
break
if _bashcomplete.start_of_option(arg_str):
last_option = arg_str
if not last_option:
return False
if last_option[:2] == "--":
return last_option in cmd_param.opts
assert last_option[:1] == "-", last_option
for i in range(len(last_option), 0, -1):
if "-%s" % last_option[i:] in cmd_param.opts:
return True
return False
| 3,446 |
def confirm_install() -> bool:
"""
Confirms that update should be performed on an empty install
"""
message = (
"The pack you are trying to update doesn't have a pack-manifest.json file. "
"Unless you are doing a first install, *THIS SHOULD NOT HAPPEN*. If you are doing a first install, just click 'OK'\n\n"
"Your pack is currently broken from MPM point of view, but should still run."
"\nIf you proceed, the udpate process will duplicate mods and add conflicting overrides:"
" this *WILL BREAK* your pack for minecraft too. It is advised to cancel"
)
root = tk.Tk()
root.withdraw()
try:
return mbox.askokcancel(title="Confirm Update", message=message)
finally:
root.destroy()
| 3,447 |
def f_cv(x, dt):
""" state transition function for a
constant velocity aircraft"""
F = np.array([[1, dt, 0.5*dt*dt, 0, 0, 0],
[0, 1, dt, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, dt, 0.5*dt*dt],
[0, 0, 0, 0, 1, dt],
[0, 0, 0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
| 3,448 |
def p_definition(p):
"""definition : PARSER_NAME COLON expr NL"""
p[0] = Definition(p[1], p[3], p[2], p[4])
| 3,449 |
async def generate(args: argparse.Namespace, core: Voice2JsonCore) -> None:
"""Generate randomish examples from intent graph."""
import networkx as nx
import rhasspynlu
# Make sure profile has been trained
assert core.check_trained(), "Not trained"
# Load settings
intent_graph_path = core.ppath(
"intent-recognition.intent-graph", "intent.pickle.gz"
)
# Load intent graph
_LOGGER.debug("Loading %s", intent_graph_path)
with gzip.GzipFile(intent_graph_path, mode="rb") as graph_gzip:
intent_graph = nx.readwrite.gpickle.read_gpickle(graph_gzip)
start_node, end_node = rhasspynlu.jsgf_graph.get_start_end_nodes(intent_graph)
assert (start_node is not None) and (
end_node is not None
), "Missing start/end node(s)"
paths_left = None
if args.number > 0:
paths_left = args.number
# Iterate through all paths
for path in itershuffle(dag_paths_random(intent_graph, start_node, end_node)):
if paths_left is not None:
paths_left -= 1
if paths_left < 0:
# Stop iterating
break
if args.raw_symbols:
# Output labels directly from intent graph
symbols = []
for from_node, to_node in rhasspynlu.utils.pairwise(path):
edge_data = intent_graph.edges[(from_node, to_node)]
olabel = edge_data.get("olabel")
if olabel:
symbols.append(olabel)
print(" ".join(symbols))
continue
# Convert to intent
_, recognition = rhasspynlu.fsticuffs.path_to_recognition(path, intent_graph)
if not recognition:
_LOGGER.warning("Recognition failed for path: %s", path)
continue
intent = dataclasses.asdict(recognition)
# Add slots
intent["slots"] = {}
for ev in intent["entities"]:
intent["slots"][ev["entity"]] = ev["value"]
if args.iob:
# IOB format
token_idx = 0
entity_start = {ev["start"]: ev for ev in intent["entities"]}
entity_end = {ev["end"]: ev for ev in intent["entities"]}
entity = None
word_tags = []
for word in intent["tokens"]:
# Determine tag label
tag = "O" if not entity else f"I-{entity}"
if token_idx in entity_start:
entity = entity_start[token_idx]["entity"]
tag = f"B-{entity}"
word_tags.append((word, tag))
# word ner
token_idx += len(word) + 1
if (token_idx - 1) in entity_end:
entity = None
print("BS", end=" ")
for wt in word_tags:
print(wt[0], end=" ")
print("ES", end="\t")
print("O", end=" ") # BS
for wt in word_tags:
print(wt[1], end=" ")
print("O", end="\t") # ES
# Intent name last
print(intent["intent"]["name"])
else:
# Write as jsonl
print_json(intent)
| 3,450 |
def prompt_for_password(args):
"""
if no password is specified on the command line, prompt for it
"""
if not args.password:
args.password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
return args
| 3,451 |
def update_hparams(hparams, new_hparams):
""" Update existing with new hyperparameters """
if new_hparams is None:
return hparams
if isinstance(new_hparams, str) and new_hparams.endswith('.json'):
tf.logging.info("Overriding default hparams from JSON")
with open(new_hparams) as fh:
hparams.parse_json(fh.read())
elif isinstance(new_hparams, str):
tf.logging.info("Overriding default hparams from str:")
hparams.parse(new_hparams)
elif isinstance(new_hparams, dict):
tf.logging.info("Overriding default hparams from dict:")
for k, val in new_hparams.items():
if k in hparams:
tf.logging.info(" {} -> {}".format(k, val))
hparams.set_hparam(k, val)
elif isinstance(new_hparams, Namespace):
tf.logging.info("Overriding default hparams from Namespace:")
for k, val in vars(new_hparams).items():
if k in hparams and val is not None:
tf.logging.info(" {} -> {}".format(k, val))
hparams.set_hparam(k, val)
else:
raise ValueError(new_hparams)
return hparams
| 3,452 |
def relative_subpackage_import(path: str, package: str) -> Any:
"""[summary]
Args:
path (str): [description]
package (str): [description].
Returns:
Any: [description]
"""
if not path.startswith('.'):
path = '.' + path
return importlib.import_module(path, package = package)
| 3,453 |
def convertToNpArray(train,test):
"""
Converts the data into numpy arrays
:param train: training data csv path
:param test: test data csv path
:return: training data and labels, test data and labels
"""
train_data = pd.read_csv(train, delimiter=',', quotechar='"',
dtype=None, encoding="ISO-8859-1",
usecols=[0, 5])
train_array = create_train_data_subset(train_data)
np.random.shuffle(train_array)
train_target_array = train_array[:, 0]
train_target_array = np.reshape(train_target_array, (len(train_target_array), 1))
train_data_array = train_array[:, 1]
train_data_array = np.reshape(train_data_array, (len(train_data_array), 1))
test_data = pd.read_csv(test, delimiter=',', quotechar='"',
dtype=None, encoding="ISO-8859-1",
usecols=[0, 5], names=['label', 'tweet'])
test_data = test_data[test_data.label != 2]
test_data = test_data.values
test_data = np.append(test_data, create_test_data_subset(train_data), axis=0)
np.random.shuffle(test_data)
test_target = test_data[:, 0]
test_target_array = np.array(test_target)
test_target_array = np.reshape(test_target_array, (len(test_target_array), 1))
test_data = test_data[:, 1]
test_data_array = np.reshape(test_data, (len(test_data), 1))
return train_data_array,test_data_array,train_target_array,test_target_array
| 3,454 |
def update_layers(**kwargs):
"""
Update all the layers when base has been updated.
"""
pecha_id = get_pecha_id(kwargs["pecha_number"])
src_pecha_path = download_pecha(pecha_id)
click.echo(INFO.format(f"Updating base of {pecha_id} ..."))
src_opf_path = src_pecha_path / f"{pecha_id}.opf"
dst_opf_path = Path(kwargs["pecha_path"]) / f"{pecha_id}.opf"
pecha = PechaBaseUpdate(src_opf_path, dst_opf_path)
pecha.update()
| 3,455 |
def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(
attachment_outcome_str)
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name,
num_dimensions=len(dimensions)
)
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome
)
return measurement
| 3,456 |
def save_xyz_file(fname, R, Z, comment=""):
"""Units for R are expected to be Bohr and will be translated to Angstrom in output"""
assert len(R) == len(Z)
PERIODIC_TABLE = 'H He Li Be B C N O F Ne'.split()
ANGSTROM_IN_BOHR = 1.88973
with open(fname, 'w') as f:
f.write(str(len(R)) + '\n')
f.write(comment + '\n')
for Z_, R_ in zip(Z, R):
f.write(f"{PERIODIC_TABLE[Z_-1]:3>} {R_[0]/ANGSTROM_IN_BOHR:-.10f} {R_[1]/ANGSTROM_IN_BOHR:-.10f} {R_[2]/ANGSTROM_IN_BOHR:-.10f}\n")
| 3,457 |
def similarity_iou_2d(pred_boxes, true_boxes):
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (cx, cy, w, h) format.
Arguments:
pred_boxes (Tensor[B, 4, N])
true_boxes (Tensor[B, 4, M])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
def area(boxes):
return (boxes[:, :, 2] - boxes[:, :, 0]) * (boxes[:, :, 3] - boxes[:, :, 1])
pred_boxes = convert_to_corners(pred_boxes).transpose(1, 2) # BN4
true_boxes = convert_to_corners(true_boxes).transpose(1, 2) # BN4
area1 = area(pred_boxes) # BN
area2 = area(true_boxes) # BM
lt = torch.max(pred_boxes[:,:, None, :2], true_boxes[:,:, :2]) # BNM2
rb = torch.min(pred_boxes[:,:, None, 2:], true_boxes[:,:, 2:]) # BNM2
wh = (rb - lt).clamp(min=0) # BNM2
inter = wh[:, :, :, 0] * wh[:, :, :, 1] # BNM
iou = inter / (area1[:, :, None] + area2 - inter) # BNM
return iou
| 3,458 |
def get_atom_coords_by_names(residue, atom_names):
"""Given a ProDy Residue and a list of atom names, this attempts to select and return
all the atoms.
If atoms are not present, it substitutes the pad character in lieu of their
coordinates.
"""
coords = []
pad_coord = np.asarray([GLOBAL_PAD_CHAR] * 3)
for an in atom_names:
a = residue.select(f"name {an}")
if a:
coords.append(a.getCoords()[0])
else:
coords.append(pad_coord)
return coords
| 3,459 |
def test_record_get_dynamic_url_function():
"""Record get_dynamic_url function sends properly formated get request."""
response = record.get_dynamic_url('example.com', record_id=1234)
assert response.success
payload = response.payload
assert payload['url'] == 'https://api.cloudns.net/dns/get-dynamic-url.json'
assert payload['params']['domain-name'] == 'example.com'
assert payload['params']['record-id'] == 1234
| 3,460 |
def calculate_chord(radius, arc_degrees):
"""
Please see the wikipedia link for more information on how this works.
https://en.wikipedia.org/wiki/Chord_(geometry)
"""
# Calculate the arc_degrees in radians.
# We need this because sin() expects it.
arc_radians = radians(arc_degrees)
# Calculate the chord.
return radius * (2 * sin(arc_radians / 2))
| 3,461 |
def broadcast(right, left, left_fk=None, right_pk=None, keep_right_index=False):
"""
Re-indexes a series or data frame (right) to align with
another (left) series or data frame via foreign key relationship.
The index or keys on the right must be unique (i.e. this only supports
1:1 or 1:m relationhips between the right and left).
Parameters:
-----------
right: pandas.DataFrame or pandas.Series
Columns or set of columns to re-project(broadcast) from.
left: pandas.Series, pandas.Index or pandas.DataFrame
Object to align to.
if panadas.Series:
Series values are used as the foreign keys.
if pandas.Index:
The index will be used as the foreign keys.
if pandas.DataFrame
Use the 'left_fk` argument to specify one
or more columns to serve as the foreign keys.
left_fk: str or list of str
Only applicable if 'left' is a dataframe.
Column or list of columns to serve as foreign keys.
If not provided the `left's` index will be used.
right_pk: str or list of str, default None
Column or list of columns that uniquely define each row
in the the `right`. If not provided, the `right's` index will be
used.
keep_right_index: bool, optional, default False
If True, and the `right` is a data frame, and a `right_pk` arg is provided,
then column(s) containing the `right's` index values will be
appended to the result.
Returns:
--------
pandas.Series or pandas.DataFrame with column(s) from
right aligned with the left.
"""
update_index = True
# if we're broadcasting using a data frame , we need to know which column(s)
if isinstance(left, pd.DataFrame) and left_fk is None:
raise ValueError(
'If the left is a DataFrame, must supply the left_fk (column name to join on)')
# if right primary keys are explicitly provided
if right_pk:
if keep_right_index:
right = right.reset_index()
right.set_index(right_pk, inplace=True)
else:
right = right.set_index(right_pk)
# ensure that we can align correctly
if not right.index.is_unique:
raise ValueError("The right's index must be unique!")
# decide how to broadcast based on the type of left provided
if isinstance(left, pd.Index):
update_index = False
# for cases where a left_fk is provided as a list with a single element
if left_fk:
if isinstance(left_fk, list):
if len(left_fk) == 1:
left_fk = left_fk[0]
if isinstance(left, pd.DataFrame):
if left_fk:
left = left[left_fk]
else:
left = left.index
update_index = False
# reindex
a = right.reindex(left)
# update the index if necessary
if update_index:
a.index = left.index.copy()
return a
| 3,462 |
def _is_arraylike(arr):
"""Check if object is an array."""
return (
hasattr(arr, "shape")
and hasattr(arr, "dtype")
and hasattr(arr, "__array__")
and hasattr(arr, "ndim")
)
| 3,463 |
def exportCandidatePairs(candidatePairs, output_file, log, numReads=None):
"""
Export candidate pairs to a file.
The type of file is determined on the provided filename for output_file.
Supported filetypes: txt, json, pickle (python) and csv.
"""
tim = time.clock()
# Output file extension
ext = output_file.rsplit(".", 1)
# default to txt if no extension provided
if len(ext) == 0:
ext = "txt"
output_file += ".txt"
else:
ext = ext[1]
# save set information - However, more space consuming
# and not needed. Hence, this should never be used.
if ext == "set_pickle":
with open(output_file, "w") as f:
pickle.dump(candidatePairs, f)
elif ext == "json":
with open(output_file, "w") as f:
if isinstance(candidatePairs[0], set):
for id1 in candidatePairs:
candidatePairs[id1] = list(candidatePairs[id1])
json.dump(candidatePairs, f)
elif ext == "pickle":
with open(output_file, "w") as f:
if isinstance(candidatePairs[0], set):
for id1 in candidatePairs:
candidatePairs[id1] = list(candidatePairs[id1])
pickle.dump(candidatePairs, f)
elif ext == "txt":
with open(output_file, "w") as f:
for id1 in candidatePairs:
f.write(str(id1)+"\t")
#sortedElements = sorted(list(candidatePairs[id1]))
sortedElements = list(candidatePairs[id1])
for id2 in sortedElements[:-1]:
f.write(str(id2)+",")
if len(sortedElements) > 0:
f.write(str(sortedElements[-1])+"\n")
# Test-only write the first numReads reads to output file.
elif ext == "temp":
with open(output_file, "w") as f:
for id1 in xrange(numReads):
f.write(str(id1)+"\t")
#sortedElements = sorted(list(candidatePairs[id1]))
#print sortedElements
if id1 in candidatePairs:
sortedElements = list(candidatePairs[id1])
for id2 in sortedElements[:-1]:
f.write(str(id2)+",")
if len(sortedElements) > 0:
f.write(str(sortedElements[-1]))
f.write("\n")
elif ext == "txt2":
with open(output_file, "w") as f:
for id1 in candidatePairs:
for id2 in candidatePairs[id1]:
f.write(str(id1)+"\t"+str(id2)+"\n")
elif ext == "csv":
w = csv.writer(open(output_file+".csv", "w"))
for key, val in candidatePairs.items():
w.writerow([key, val])
# Else export to whatever filename that is provided in the format
# used for txt files.
else:
output_file += ".txt"
with open(output_file, "w") as f:
for id1 in candidatePairs:
f.write(str(id1)+"\t")
sortedElements = list(candidatePairs[id1])
for id2 in sortedElements[:-1]:
f.write(str(id2)+",")
f.write(str(sortedElements[-1])+"\n")
logprint(log, False, "Exported candidate pairs to", output_file,
"in", time.clock()-tim, "seconds")
| 3,464 |
def test_deep_agg_feat_chain(es):
"""
Agg feat of agg feat:
region.Mean(customer.Count(Log))
"""
customer_count_feat = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
region_avg_feat = ft.Feature(customer_count_feat, parent_entity=es[u'régions'], primitive=Mean)
feature_set = FeatureSet([region_avg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(['United States']))
v = df[region_avg_feat.get_name()][0]
assert (v == 17 / 3.)
| 3,465 |
def tokenize_docstring(text):
"""Tokenize docstrings.
Args:
text: A docstring to be tokenized.
Returns:
A list of strings representing the tokens in the docstring.
"""
en = spacy.load('en')
tokens = en.tokenizer(text.decode('utf8'))
return [token.text.lower() for token in tokens if not token.is_space]
| 3,466 |
def conv_output_length(input_length, filter_size,
border_mode, stride, dilation=1):
"""Determines output length of a convolution given input length.
# Arguments
input_length: integer.
filter_size: integer.
border_mode: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
# Returns
The output length (integer).
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
elif border_mode == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
| 3,467 |
def source_ccp4():
"""Function to return bash command to source CCP4"""
if os.name == "nt":
return
return "source {}".format(os.path.join(os.environ["CCP4"], "bin", "ccp4.setup-sh"))
| 3,468 |
def dblHour():
"""(read-only) Array of doubles containgin time value in hours for time-sampled monitor values; Empty if frequency-sampled values for harmonics solution (see dblFreq)"""
return get_float64_array(lib.Monitors_Get_dblHour)
| 3,469 |
def select_uuid_like_indexes_on_table(model, cursor):
"""
Gets a list of database index names for the given model for the
uuid-containing fields that have had a like-index created on them.
:param model: Django model
:param cursor: database connection cursor
:return: list of database rows; the first field of each row is an index
name
"""
# VersionedForeignKey fields as well as the id fields have these useless
# like indexes
field_names = ["'%s'" % f.column for f in model._meta.fields if
isinstance(f, VersionedForeignKey)]
field_names.append("'id'")
sql = """
select i.relname as index_name
from pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
where t.oid = ix.indrelid
and i.oid = ix.indexrelid
and a.attrelid = t.oid
and a.attnum = ANY(ix.indkey)
and t.relkind = 'r'
and t.relname = '{0}'
and a.attname in ({1})
and i.relname like '%_like'
""".format(model._meta.db_table, ','.join(field_names))
cursor.execute(sql)
return cursor.fetchall()
| 3,470 |
def retrieve_jambalaya(request):
"""
Retrieve a jambalaya recipe by name or country of origin
---
serializer: JambalayaSerializer
parameters:
- name: name
description: name as found in recipe
type: string
paramType: query
required: false
- name: origin
type: string
paramType: query
required: false
"""
if request.method == 'GET':
serializer = JambalayaQuerySerializer(data=request.DATA)
if serializer.data['name'] is not None:
j = Jambalaya.objects.filter(recipe__contains='name=%s' % serializer.data['name'])
else:
j = Jambalaya.objects.filter(recipe__contains="country=%s" % serializer.data['origin'])
serializer = JambalayaSerializer(j, many=True)
return Response(serializer.data)
else:
return Response("", status=status.HTTP_400_BAD_REQUEST)
| 3,471 |
def get_pymatgen(optimade_structure: OptimadeStructure) -> Union[Structure, Molecule]:
"""Get pymatgen `Structure` or `Molecule` from OPTIMADE structure.
This function will return either a pymatgen `Structure` or `Molecule` based
on the periodicity or periodic dimensionality of OPTIMADE structure.
For bulk, three-dimensional structures, a pymatgen `Structure` is returned.
This means, if the [`dimension_types`][optimade.models.structures.StructureResourceAttributes.dimension_types]
attribute is comprised of all `1`s (or [`Periodicity.PERIODIC`][optimade.models.structures.Periodicity.PERIODIC]s).
Otherwise, a pymatgen `Molecule` is returned.
Parameters:
optimade_structure: OPTIMADE structure.
Returns:
A pymatgen `Structure` or `Molecule` based on the periodicity of the
OPTIMADE structure.
"""
if "optimade.adapters" in repr(globals().get("Structure")):
warn(PYMATGEN_NOT_FOUND, AdapterPackageNotFound)
return None
if all(optimade_structure.attributes.dimension_types):
return _get_structure(optimade_structure)
return _get_molecule(optimade_structure)
| 3,472 |
def get_meshgrid_samples(lower, upper, mesh_size: tuple, dtype) ->\
torch.Tensor:
"""
Often we want to get the mesh samples in a box lower <= x <= upper.
This returns a torch tensor of size (prod(mesh_size), sample_dim), where
each row is a sample in the meshgrid.
"""
sample_dim = len(mesh_size)
assert (len(upper) == sample_dim)
assert (len(lower) == sample_dim)
assert (len(mesh_size) == sample_dim)
meshes = []
for i in range(sample_dim):
meshes.append(
torch.linspace(lower[i], upper[i], mesh_size[i], dtype=dtype))
mesh_tensors = torch.meshgrid(*meshes)
return torch.cat(
[mesh_tensors[i].reshape((-1, 1)) for i in range(sample_dim)], dim=1)
| 3,473 |
def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes,
colors):
"""
Function to return the html-string of the node drawings for the
gantt chart
Parameters
----------
start : datetime.datetime obj
start time for first node
nodes_list : list
a list of the node dictionaries
cores : integer
the number of cores given to the workflow via the 'n_procs'
plugin arg
total_duration : float
total duration of the workflow execution (in seconds)
minute_scale : integer
the scale, in minutes, at which to plot line markers for the
gantt chart; for example, minute_scale=10 means there are lines
drawn at every 10 minute interval from start to finish
space_between_minutes : integer
scale factor in pixel spacing between minute line markers
colors : list
a list of colors to choose from when coloring the nodes in the
gantt chart
Returns
-------
result : string
the html-formatted string for producing the minutes-based
time line markers
"""
# Init variables
result = ""
scale = space_between_minutes / minute_scale
space_between_minutes = space_between_minutes / scale
end_times = [
datetime.datetime(
start.year, start.month, start.day, start.hour, start.minute,
start.second
)
for core in range(cores)
]
# For each node in the pipeline
for node in nodes_list:
# Get start and finish times
node_start = node["start"]
node_finish = node["finish"]
# Calculate an offset and scale duration
offset = (
(node_start - start).total_seconds() / 60
) * scale * space_between_minutes + 220
# Scale duration
scale_duration = (node["duration"] / 60) * scale \
* space_between_minutes
if scale_duration < 5:
scale_duration = 5
scale_duration -= 2
# Left
left = 60
for core in range(len(end_times)):
if end_times[core] < node_start:
left += core * 30
end_times[core] = datetime.datetime(
node_finish.year,
node_finish.month,
node_finish.day,
node_finish.hour,
node_finish.minute,
node_finish.second,
)
break
# Get color for node object
color = random.choice(colors)
if "error" in node:
color = "red"
# Setup dictionary for node html string insertion
node_dict = {
"left": left,
"offset": offset,
"scale_duration": scale_duration,
"color": color,
"node_name": node.get("name", node.get("id", "")),
"node_dur": node["duration"] / 60.0,
"node_start": node_start.strftime("%Y-%m-%d %H:%M:%S"),
"node_finish": node_finish.strftime("%Y-%m-%d %H:%M:%S"),
}
# Create new node string
new_node = (
"<div class='node' style='left:%(left)spx;top:%(offset)spx;"
"height:%(scale_duration)spx;background-color:%(color)s;'"
"title='%(node_name)s\nduration:%(node_dur)s\n"
"start:%(node_start)s\nend:%(node_finish)s'></div>" % node_dict
)
# Append to output result
result += new_node
# Return html string for nodes
return result
| 3,474 |
def has_understood_request(
sys_nlu: dict, slot: str, domain: str, lowercase_slots: bool = True
) -> bool:
"""Check if the system has understood a user request in a particular domain."""
# assume perfect system if NLU not available
if not sys_nlu:
return True
sys_nlu_requested = get_turn_action_params(
sys_nlu,
act_patterns=metadata.REQUEST_ACT_PATTERNS,
service_patterns=[domain],
include_values=False,
use_lowercase=lowercase_slots,
)[
domain
] # type: list[str]
assert all("-" not in slt for slt in sys_nlu_requested)
sys_nlu_requested = [f"{domain}-{slt}" for slt in sys_nlu_requested]
return slot in sys_nlu_requested
| 3,475 |
def lengthOfLongestSubstring(s):
"""
:type s: str
:rtype: int
"""
res = ""
n = 0
for i in s:
if i not in res:
res = res + i
else:
indexofi = res.find(i)
res = res[indexofi+1::] + i
k = len(res)
if k > n:
n = k
print(res)
return n
| 3,476 |
async def get_sequence_metadata(checksum: str, accept: str = ""):
"""Return Refget sequence metadata based on checksum value."""
headers = Headers()
url_path = "sequence/" + checksum + "/metadata"
try:
result = await create_request_coroutine(
url_list=metadata_url_list(checksum),
url_path=url_path,
headers=headers,
params={accept: accept},
)
if result == "":
return HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Not Found")
return result
except Exception as e:
logger.log("DEBUG", "Unhandled exception in get_sequence_metadata: " + str(e))
| 3,477 |
def getattrs(o, *attrs, **kwargs):
"""
>>> getattrs((), '__iter__', '__name__', 'strip')('_')
'iter'
>>> getattrs((), 'foo', 'bar', default=0)
0
"""
if 'default' in kwargs:
default = kwargs['default']
c = o
for attr in attrs:
try:
c = getattr(c, attr)
except AttributeError:
return default
return c
else:
return reduce(getattr, attrs, o)
| 3,478 |
def maria_create_account(params):
"""root user and dbuser are created at startup.
grant all to dbuser is all we need to do after the DB starts
:type params: dict
"""
error_msg = 'ERROR: mariadb_util; maria_create_account; '
error_msg += 'action: %s user: %s error: %s'
password = Config.accounts[params['dbtype']]['admin_pass']
iport = int(params['port'])
try:
conn = pymysql.connect(host=Config.container_host, port=iport,
user='root',
password=password)
except pymysql.err.OperationalError as e:
print("ERROR: maria_create_account, connect: %s" % e)
return "connect error"
cur = conn.cursor()
sql_cmd = "GRANT ALL PRIVILEGES ON *.* TO '%s'@'%%' " % params['dbuser']
sql_cmd += "WITH GRANT OPTION"
try:
cur.execute(sql_cmd)
except pymysql.err.InternalError as e:
print(error_msg % ('grant', params['dbuser'], e))
conn.commit()
cur.close()
conn.close()
return 'ok'
| 3,479 |
def compute_coef_xz(y_val, coef_3d):
"""
compute the 2D polynoimal coefficients for a given x
:param x_val: value of x
:param coef_3d: the original 3D polynomials
:return:
"""
coef_xz = np.zeros((coef_3d.shape[1], coef_3d.shape[2]), dtype=coef_3d.dtype)
max_degree_y = coef_3d.shape[0] - 1
for y_power in range(max_degree_y + 1):
coef_xz += coef_3d[y_power, :, :] * y_val ** (max_degree_y - y_power)
return coef_xz
| 3,480 |
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
| 3,481 |
def me_length_filter(me_iv_pairs, min_length=100):
"""Returns list of (InsertionVertices, InsertionVertices) tuples
with those containing paths going backwards through the ME sequence
filtered out
"""
filtered = []
for iv_pair in me_iv_pairs:
enter_iv, exit_iv = iv_pair
me_seq_len = exit_iv.exit_ref.pos - enter_iv.enter_ref.pos
if me_seq_len > min_length:
filtered.append(iv_pair)
return filtered
| 3,482 |
def main_cli(ctx, args=None, init=False, tables=False, refresh=False, prompt=False):
"""Helper to print GBQ schema information. Pass '.' to show all fields.
$ dimschema <OPT:table-name> <field-pattern>
"""
click.secho("Dimensions GBQ schema-helper (" + VERSION + ")", dim=True)
config = ConfigManager()
if init:
config.init_config_folder(force=True)
return
config.init_config_folder()
gbq = BigQueryManager(config)
if tables:
print(tabulate(gbq.tables_list(), showindex="false", headers="keys", tablefmt='simple'))
return
if refresh:
click.secho("Refreshing GBQ..", fg="red")
df = gbq.get_fields_list_from_cache(refresh_cache=True)
return
if prompt:
click.secho("Enter a SQL query (experimental). Enter an empty line to run. Syntax sugar: 'dim.'='dimensions-ai.data_analytics.'", fg="red")
sentinel = '' # ends when this string is seen
contents = []
for line in iter(input, sentinel):
contents.append(line)
sql = " ".join(contents)
click.secho("... running GBQ query", dim=True)
df = gbq.any_query(sql)
# disable_numparse=True => avoid scientific notation in large numbers
print(tabulate(df, showindex="false", headers="keys", tablefmt='grid', disable_numparse=True))
return
if args:
if len(args) > 1:
# first arg a table name
table, searchterm = args[0], args[1]
else:
searchterm, table = args[0], None
if searchterm == ".":
searchterm = None
if table and table not in VALID_TABLE_NAMES:
click.secho("Table name not valid. Use --tables to verify.", fg="red")
return
df = gbq.get_fields_list_from_cache(table=table, pattern=searchterm)
print(tabulate(df, showindex="false", headers="keys", tablefmt='simple'))
print("=====")
print("Total fields: ", len(df))
return
click.echo(ctx.get_help())
return
| 3,483 |
def test_updated_large_investor_profile_synced(es_with_signals):
"""Test that when an large investor profile is updated it is synced to ES."""
large_investor_profile = LargeCapitalInvestorProfileFactory()
large_investor_profile.investable_capital = 12345
large_investor_profile.save()
es_with_signals.indices.refresh()
| 3,484 |
def build_big_map_schema(data, schema: Schema) -> BigMapSchema:
""" Generate Big_map schema from the contract storage
:param data: Raw storage (Micheline expression)
:param schema: Storage schema
:returns: Mappings: Big_map id to JSON path and vice versa
:rtype: BigMapSchema
"""
bin_to_id = dict()
id_to_bin = dict()
def scan_big_map_ids(node, path):
if len(path) == 0:
assert node.get('int'), (node, path)
yield int(node['int'])
elif isinstance(node, list):
for item in node:
yield from scan_big_map_ids(item, path)
else:
assert node.get('args'), (node, path)
yield from scan_big_map_ids(node['args'][int(path[0])], path[1:])
for bin_path, prim in schema.bin_types.items():
if prim == 'big_map':
for big_map_id in scan_big_map_ids(data, bin_path[1:]):
bin_to_id[bin_path], id_to_bin[big_map_id] = big_map_id, bin_path
return BigMapSchema(bin_to_id, id_to_bin)
| 3,485 |
def test_initial_tokens():
"""Checks the procedure for finding the initial tokens"""
csnet = example.control.cs_network()
initial_step = Fraction(1, 5)
slaves, connections = csnet
step_sizes: cs.StepSizes = {
name: (idx + 5) * initial_step for idx, name in enumerate(slaves.keys())
}
make_zoh: cs.ConverterConstructor = cs.Zoh
rate_converters = {cs.Connection(src, dst): make_zoh for dst, src in connections.items()}
initial_tokens = find_initial_tokens(csnet, step_sizes, rate_converters)
rpv = cs.repetition_vector(connections, step_sizes)
for (name, _), buffer in initial_tokens.items():
if name in slaves:
assert rpv[name] == len(buffer)
| 3,486 |
def _get(session, urlTail):
# type: (Session, str) -> Dict
"""Make an HTTP(s) GET request to Batfish coordinator.
:raises SSLError if SSL connection failed
:raises ConnectionError if the coordinator is not available
"""
headers = {CoordConsts.HTTP_HEADER_BATFISH_APIKEY: session.apiKey,
CoordConsts.HTTP_HEADER_BATFISH_VERSION: pybatfish.__version__}
url = session.get_base_url2() + urlTail
response = requests.get(url, headers=headers, verify=session.verifySslCerts)
response.raise_for_status()
return dict(response.json())
| 3,487 |
def compose(fns):
"""Creates a function composition."""
def composition(*args, fns_):
res = fns_[0](*args)
for f in fns_[1:]:
res = f(*res)
return res
return functools.partial(composition, fns_=fns)
| 3,488 |
def load_participants_file():
"""
Load participants.tsv file and build pandas DF of participants
This function assumes that the file participants.tsv is present in the -path-results
:return: participants: pandas dataframe
"""
participants = pd.read_csv(os.path.join('participants.tsv'), sep="\t")
return participants
| 3,489 |
def test_word3():
"""runs test3"""
out1 = getoutput(open + ' foobarbaz')
assert out1.rstrip() == 'ffofoofoobfoobafoobarfoobarbfoobarbafoobarbaz'
| 3,490 |
def clr_tilcmt(*args):
"""
clr_tilcmt(ea)
"""
return _ida_nalt.clr_tilcmt(*args)
| 3,491 |
def canarize_service(args, input_yaml, labels={}):
"""
Create a canary for an existing Service.
We do this by:
- adding a '-canary' suffix to the name of the Service
- adding a '-canary' suffix to all the labels in the Service selector
"""
res = []
# append the -canary to the Service name
output_yaml = copy.deepcopy(input_yaml)
canary_service_name = input_yaml["metadata"]["name"] + args.suffix
output_yaml["metadata"]["name"] = canary_service_name
print(f"# Creating canary Service {canary_service_name}")
# append the -canary to all the labels in the selector
for (k, v) in input_yaml["spec"]["selector"].items():
output_yaml["spec"]["selector"][k] = v + args.suffix
if args.namespace:
output_yaml["metadata"]["namespace"] = args.namespace
res += [output_yaml]
if args.gen_mapping:
canary_service_name = output_yaml["metadata"]["name"]
print(
f"# Creating Mapping for Service {canary_service_name} (weight: {args.canary_weight})")
res += [gen_mapping(args, canary_service_name,
weight=args.canary_weight, labels=labels)]
if len(labels) > 0:
if len(output_yaml["metadata"]["labels"]) > 0:
output_yaml["metadata"]["labels"].update(labels)
else:
output_yaml["metadata"]["labels"] = labels
return res
| 3,492 |
def get_query_results(query):
"""
Get the data with common fields from the Close using the provided query.
:param query: Any Close search query eg. 'lead_status:Potential has:emails'
:return: 2D array with a header and results
"""
api = Client(CLOSE_API_KEY)
leads = api.get('lead', params={'query': query})
values = [[
'id',
'display_name',
'lead_name',
'description',
'url',
'status_id',
'status_label',
'primary_contact_name',
'primary_contact_first_name',
'primary_contact_last_name',
'primary_contact_title',
'primary_contact_primary_phone',
'primary_contact_primary_phone_type',
'primary_contact_other_phones',
'primary_contact_primary_email',
'primary_contact_primary_email_type',
'primary_contact_other_emails',
'primary_contact_primary_url',
'primary_contact_other_urls',
'created_by',
'created_by_name',
'updated_by',
'updated_by_name',
'date_created',
'date_updated',
'html_url'
]]
for lead in leads['data']:
primary_contact = lead['contacts'][0] if lead['contacts'] else None
id = lead['id']
display_name = lead['display_name']
lead_name = lead['name']
description = lead['description']
url = lead['url']
status_id = lead['status_id']
status_label = lead['status_label']
created_by = lead['created_by']
created_by_name = lead['created_by_name']
updated_by = lead['updated_by']
updated_by_name = lead['updated_by_name']
date_created = lead['date_created']
date_updated = lead['date_updated']
html_url = lead['html_url']
primary_contact_name = None
primary_contact_first_name = None
primary_contact_last_name = None
primary_contact_title = None
primary_contact_primary_phone = None
primary_contact_primary_phone_type = None
primary_contact_other_phones = None
primary_contact_email = None
primary_contact_email_type = None
primary_contact_other_emails = None
primary_contact_primary_url = None
primary_contact_other_urls = None
if primary_contact:
primary_contact_name = primary_contact['name'] if primary_contact else None
primary_contact_title = primary_contact['title'] if primary_contact else None
if 'name' in primary_contact:
primary_contact_first_name = primary_contact['name'].split(' ')[0]
if len(primary_contact['name'].split(' ')) > 1:
primary_contact_last_name = primary_contact['name'].split(' ')[1]
if primary_contact['phones']:
primary_contact_primary_phone = primary_contact['phones'][0]['phone']
primary_contact_primary_phone_type = primary_contact['phones'][0]['type']
if len(primary_contact['phones']) > 1:
primary_contact_other_phones = ", ".join(o['phone'] for o in primary_contact['phones'][1:])
if primary_contact['emails']:
primary_contact_email = primary_contact['emails'][0]['email']
primary_contact_email_type = primary_contact['emails'][0]['type']
if len(primary_contact['emails']) > 1:
primary_contact_other_emails = ", ".join(o['email'] for o in primary_contact['emails'][1:])
if primary_contact['urls']:
primary_contact_primary_url = primary_contact['urls'][0]['url']
if len(primary_contact['urls']) > 1:
primary_contact_other_urls = ", ".join(o['url'] for o in primary_contact['urls'][1:])
values.append([
id,
display_name,
lead_name,
description,
url,
status_id,
status_label,
primary_contact_name,
primary_contact_first_name,
primary_contact_last_name,
primary_contact_title,
primary_contact_primary_phone,
primary_contact_primary_phone_type,
primary_contact_other_phones,
primary_contact_email,
primary_contact_email_type,
primary_contact_other_emails,
primary_contact_primary_url,
primary_contact_other_urls,
created_by,
created_by_name,
updated_by,
updated_by_name,
date_created,
date_updated,
html_url
])
return values
| 3,493 |
def width_pcc_dmera_2d(n, D, supp):
"""
Optimal width of the circuit for the pcc after compression
Args:
n(int): Number of scales
D(int): Number of cycles per scale
supp(list): List of integers
Returns:
int: Optimal width
"""
supp_con = [convert_2d_to_1d(c,n) for c in supp]
return optimal_width_freeze(pcc_dmera_2d(n,D,supp_con),supp_con)
| 3,494 |
def global_delete(key):
"""Delete an entity from the global cache.
Args:
key (bytes): The key to delete.
Returns:
tasklets.Future: Eventual result will be ``None``.
"""
batch = _batch.get_batch(_GlobalCacheDeleteBatch)
return batch.add(key)
| 3,495 |
def get_drawing_x(image: Image = None) -> float:
"""
Get the x coordinate value of the current drawing position (x,y).
Some drawing functions will use the current pos to draw.(see line_to(),line_rel(),move_to(),move_rel()).
:param image: the target image whose drawing pos is to be gotten. None means it is the target image
(see set_target() and get_target()).
:return: the x coordinate value of the current drawing position
"""
image = _get_target_image(image)
return image.get_x()
| 3,496 |
async def get_by_name(username: str) -> Dict[str, Any]:
"""
Retrieve one row based by its name. Return object is a dict.
Raises if the record was not found.
"""
username = username.lower()
for user in Database:
if(user["username"] == username):
return user
raise RecordNotFoundError(f"Could not find row with username '{username}'")
| 3,497 |
def semantics(address: str) -> "show_semantics_page":
""" Semantics of address. """
response = requests.get(
f"{request.url_root}api/semantics/{EthConfig.DEFAULT_CHAIN}/{address}",
headers={"x-api-key": current_app.config["API_KEY"]},
)
return show_semantics_page(response)
| 3,498 |
def update_trail(clt, trail_name, log_group_arn, role_arn):
"""
Update Trail to integrate with CloudWatch Logs
"""
try:
result = clt.update_trail(
Name = trail_name,
CloudWatchLogsLogGroupArn = log_group_arn,
CloudWatchLogsRoleArn = role_arn,
)
except ClientError as e:
print(e.response['Error']['Message'])
return 'fail'
return
| 3,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.