body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
511b263c38267cb5b5a8f1381182d27f072cc725b8b7dcb05fc3cb82e59fadef
|
@torch.no_grad()
def generate(self, models, sample: Dict[(str, Dict[(str, Tensor)])], **kwargs):
'Generate translations. Match the api of other fairseq generators.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n '
self.model.reset_incremental_state()
if (self.desired_length > (- 1)):
sample['net_input']['tgt_lengths'].fill_((self.desired_length + 1))
return self._generate(sample, **kwargs)
|
Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
|
fairseq/sequence_generator.py
|
generate
|
takase/alone_seq2seq
| 25 |
python
|
@torch.no_grad()
def generate(self, models, sample: Dict[(str, Dict[(str, Tensor)])], **kwargs):
'Generate translations. Match the api of other fairseq generators.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n '
self.model.reset_incremental_state()
if (self.desired_length > (- 1)):
sample['net_input']['tgt_lengths'].fill_((self.desired_length + 1))
return self._generate(sample, **kwargs)
|
@torch.no_grad()
def generate(self, models, sample: Dict[(str, Dict[(str, Tensor)])], **kwargs):
'Generate translations. Match the api of other fairseq generators.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n '
self.model.reset_incremental_state()
if (self.desired_length > (- 1)):
sample['net_input']['tgt_lengths'].fill_((self.desired_length + 1))
return self._generate(sample, **kwargs)<|docstring|>Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)<|endoftext|>
|
59fab48172619be361ddf7056d091bf4808cc344ba3d0da345555824f19b022a
|
def _prefix_tokens(self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int):
'Handle prefix tokens'
prefix_toks = prefix_tokens[(:, step)].unsqueeze((- 1)).repeat(1, beam_size).view((- 1))
prefix_lprobs = lprobs.gather((- 1), prefix_toks.unsqueeze((- 1)))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor((- math.inf)).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_((- 1), prefix_toks[prefix_mask].unsqueeze((- 1)), prefix_lprobs[prefix_mask])
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
first_beam = tokens[eos_mask].view((- 1), beam_size, tokens.size((- 1)))[(:, 0, 1:(step + 1))]
eos_mask_batch_dim = eos_mask.view((- 1), beam_size)[(:, 0)]
target_prefix = prefix_tokens[eos_mask_batch_dim][(:, :step)]
assert (first_beam == target_prefix).all()
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return (lprobs, tokens, scores)
|
Handle prefix tokens
|
fairseq/sequence_generator.py
|
_prefix_tokens
|
takase/alone_seq2seq
| 25 |
python
|
def _prefix_tokens(self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int):
prefix_toks = prefix_tokens[(:, step)].unsqueeze((- 1)).repeat(1, beam_size).view((- 1))
prefix_lprobs = lprobs.gather((- 1), prefix_toks.unsqueeze((- 1)))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor((- math.inf)).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_((- 1), prefix_toks[prefix_mask].unsqueeze((- 1)), prefix_lprobs[prefix_mask])
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
first_beam = tokens[eos_mask].view((- 1), beam_size, tokens.size((- 1)))[(:, 0, 1:(step + 1))]
eos_mask_batch_dim = eos_mask.view((- 1), beam_size)[(:, 0)]
target_prefix = prefix_tokens[eos_mask_batch_dim][(:, :step)]
assert (first_beam == target_prefix).all()
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return (lprobs, tokens, scores)
|
def _prefix_tokens(self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int):
prefix_toks = prefix_tokens[(:, step)].unsqueeze((- 1)).repeat(1, beam_size).view((- 1))
prefix_lprobs = lprobs.gather((- 1), prefix_toks.unsqueeze((- 1)))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor((- math.inf)).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_((- 1), prefix_toks[prefix_mask].unsqueeze((- 1)), prefix_lprobs[prefix_mask])
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
first_beam = tokens[eos_mask].view((- 1), beam_size, tokens.size((- 1)))[(:, 0, 1:(step + 1))]
eos_mask_batch_dim = eos_mask.view((- 1), beam_size)[(:, 0)]
target_prefix = prefix_tokens[eos_mask_batch_dim][(:, :step)]
assert (first_beam == target_prefix).all()
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return (lprobs, tokens, scores)<|docstring|>Handle prefix tokens<|endoftext|>
|
8362761413b42c787780776a6ada581e34cb50047b70753933a5fec138caab45
|
def finalize_hypos(self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[(str, Tensor)]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int):
'Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.\n Returns number of sentences being finalized.\n Args:\n bbsz_idx (Tensor):\n '
assert (bbsz_idx.numel() == eos_scores.numel())
tokens_clone = tokens.index_select(0, bbsz_idx)[(:, 1:(step + 2))]
tokens_clone[(:, step)] = self.eos
attn_clone = (attn.index_select(0, bbsz_idx)[(:, :, 1:(step + 2))] if (attn is not None) else None)
pos_scores = scores.index_select(0, bbsz_idx)[(:, :(step + 1))]
pos_scores[(:, step)] = eos_scores
pos_scores[(:, 1:)] = (pos_scores[(:, 1:)] - pos_scores[(:, :(- 1))])
if self.normalize_scores:
eos_scores /= ((step + 1) ** self.len_penalty)
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen: Dict[(str, Optional[Tensor])] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = (idx // beam_size)
sent = (unfin_idx + cum_unfin[unfin_idx])
seen = ((str(sent.item()) + '_') + str(unfin_idx.item()))
if (seen not in sents_seen):
sents_seen[seen] = None
if (self.match_source_len and (step > src_lengths[unfin_idx])):
score = torch.tensor((- math.inf)).to(score)
if (len(finalized[sent]) < beam_size):
if (attn_clone is not None):
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append({'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': torch.empty(0), 'positional_scores': pos_scores[i]})
newly_finished: List[int] = []
for seen in sents_seen.keys():
sent: int = int(float(seen.split('_')[0]))
unfin_idx: int = int(float(seen.split('_')[1]))
if ((not finished[sent]) and self.is_finished(step, unfin_idx, max_len, len(finalized[sent]), beam_size)):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
|
Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
Returns number of sentences being finalized.
Args:
bbsz_idx (Tensor):
|
fairseq/sequence_generator.py
|
finalize_hypos
|
takase/alone_seq2seq
| 25 |
python
|
def finalize_hypos(self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[(str, Tensor)]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int):
'Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.\n Returns number of sentences being finalized.\n Args:\n bbsz_idx (Tensor):\n '
assert (bbsz_idx.numel() == eos_scores.numel())
tokens_clone = tokens.index_select(0, bbsz_idx)[(:, 1:(step + 2))]
tokens_clone[(:, step)] = self.eos
attn_clone = (attn.index_select(0, bbsz_idx)[(:, :, 1:(step + 2))] if (attn is not None) else None)
pos_scores = scores.index_select(0, bbsz_idx)[(:, :(step + 1))]
pos_scores[(:, step)] = eos_scores
pos_scores[(:, 1:)] = (pos_scores[(:, 1:)] - pos_scores[(:, :(- 1))])
if self.normalize_scores:
eos_scores /= ((step + 1) ** self.len_penalty)
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen: Dict[(str, Optional[Tensor])] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = (idx // beam_size)
sent = (unfin_idx + cum_unfin[unfin_idx])
seen = ((str(sent.item()) + '_') + str(unfin_idx.item()))
if (seen not in sents_seen):
sents_seen[seen] = None
if (self.match_source_len and (step > src_lengths[unfin_idx])):
score = torch.tensor((- math.inf)).to(score)
if (len(finalized[sent]) < beam_size):
if (attn_clone is not None):
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append({'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': torch.empty(0), 'positional_scores': pos_scores[i]})
newly_finished: List[int] = []
for seen in sents_seen.keys():
sent: int = int(float(seen.split('_')[0]))
unfin_idx: int = int(float(seen.split('_')[1]))
if ((not finished[sent]) and self.is_finished(step, unfin_idx, max_len, len(finalized[sent]), beam_size)):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
|
def finalize_hypos(self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[(str, Tensor)]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int):
'Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.\n Returns number of sentences being finalized.\n Args:\n bbsz_idx (Tensor):\n '
assert (bbsz_idx.numel() == eos_scores.numel())
tokens_clone = tokens.index_select(0, bbsz_idx)[(:, 1:(step + 2))]
tokens_clone[(:, step)] = self.eos
attn_clone = (attn.index_select(0, bbsz_idx)[(:, :, 1:(step + 2))] if (attn is not None) else None)
pos_scores = scores.index_select(0, bbsz_idx)[(:, :(step + 1))]
pos_scores[(:, step)] = eos_scores
pos_scores[(:, 1:)] = (pos_scores[(:, 1:)] - pos_scores[(:, :(- 1))])
if self.normalize_scores:
eos_scores /= ((step + 1) ** self.len_penalty)
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen: Dict[(str, Optional[Tensor])] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = (idx // beam_size)
sent = (unfin_idx + cum_unfin[unfin_idx])
seen = ((str(sent.item()) + '_') + str(unfin_idx.item()))
if (seen not in sents_seen):
sents_seen[seen] = None
if (self.match_source_len and (step > src_lengths[unfin_idx])):
score = torch.tensor((- math.inf)).to(score)
if (len(finalized[sent]) < beam_size):
if (attn_clone is not None):
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append({'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': torch.empty(0), 'positional_scores': pos_scores[i]})
newly_finished: List[int] = []
for seen in sents_seen.keys():
sent: int = int(float(seen.split('_')[0]))
unfin_idx: int = int(float(seen.split('_')[1]))
if ((not finished[sent]) and self.is_finished(step, unfin_idx, max_len, len(finalized[sent]), beam_size)):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished<|docstring|>Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
Returns number of sentences being finalized.
Args:
bbsz_idx (Tensor):<|endoftext|>
|
f079ada210ccd10275390f9f8c0d7e0eef234b6700552d6bf9a6310c4f170cce
|
def is_finished(self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int):
"\n Check whether we've finished generation for a given sentence, by\n comparing the worst score among finalized hypotheses to the best\n possible score among unfinalized hypotheses.\n "
assert (finalized_sent_len <= beam_size)
if ((finalized_sent_len == beam_size) or (step == max_len)):
return True
return False
|
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
|
fairseq/sequence_generator.py
|
is_finished
|
takase/alone_seq2seq
| 25 |
python
|
def is_finished(self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int):
"\n Check whether we've finished generation for a given sentence, by\n comparing the worst score among finalized hypotheses to the best\n possible score among unfinalized hypotheses.\n "
assert (finalized_sent_len <= beam_size)
if ((finalized_sent_len == beam_size) or (step == max_len)):
return True
return False
|
def is_finished(self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int):
"\n Check whether we've finished generation for a given sentence, by\n comparing the worst score among finalized hypotheses to the best\n possible score among unfinalized hypotheses.\n "
assert (finalized_sent_len <= beam_size)
if ((finalized_sent_len == beam_size) or (step == max_len)):
return True
return False<|docstring|>Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.<|endoftext|>
|
19593fac5eb852c1c381778a4bb31ae8963a8a31bca00a0e126a3c87bc60981a
|
@torch.jit.export
def reorder_encoder_out(self, encoder_outs: Optional[List[EncoderOut]], new_order):
'\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n '
new_outs: List[EncoderOut] = []
if (not self.has_encoder()):
return new_outs
for (i, model) in enumerate(self.models):
assert (encoder_outs is not None)
new_outs.append(model.encoder.reorder_encoder_out(encoder_outs[i], new_order))
return new_outs
|
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
|
fairseq/sequence_generator.py
|
reorder_encoder_out
|
takase/alone_seq2seq
| 25 |
python
|
@torch.jit.export
def reorder_encoder_out(self, encoder_outs: Optional[List[EncoderOut]], new_order):
'\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n '
new_outs: List[EncoderOut] = []
if (not self.has_encoder()):
return new_outs
for (i, model) in enumerate(self.models):
assert (encoder_outs is not None)
new_outs.append(model.encoder.reorder_encoder_out(encoder_outs[i], new_order))
return new_outs
|
@torch.jit.export
def reorder_encoder_out(self, encoder_outs: Optional[List[EncoderOut]], new_order):
'\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n '
new_outs: List[EncoderOut] = []
if (not self.has_encoder()):
return new_outs
for (i, model) in enumerate(self.models):
assert (encoder_outs is not None)
new_outs.append(model.encoder.reorder_encoder_out(encoder_outs[i], new_order))
return new_outs<|docstring|>Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*<|endoftext|>
|
0382e91fdf459e3d850a5ef1d7dbd6829769114e1c82a8ccfbdfa30af1bccd39
|
def __init__(self, models, tgt_dict, left_pad_target=False, **kwargs):
'Generates translations of a given source sentence.\n\n Produces alignments following "Jointly Learning to Align and\n Translate with Transformer Models" (Garg et al., EMNLP 2019).\n\n Args:\n left_pad_target (bool, optional): Whether or not the\n hypothesis should be left padded or not when they are\n teacher forced for generating alignments.\n '
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
|
Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
|
fairseq/sequence_generator.py
|
__init__
|
takase/alone_seq2seq
| 25 |
python
|
def __init__(self, models, tgt_dict, left_pad_target=False, **kwargs):
'Generates translations of a given source sentence.\n\n Produces alignments following "Jointly Learning to Align and\n Translate with Transformer Models" (Garg et al., EMNLP 2019).\n\n Args:\n left_pad_target (bool, optional): Whether or not the\n hypothesis should be left padded or not when they are\n teacher forced for generating alignments.\n '
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
|
def __init__(self, models, tgt_dict, left_pad_target=False, **kwargs):
'Generates translations of a given source sentence.\n\n Produces alignments following "Jointly Learning to Align and\n Translate with Transformer Models" (Garg et al., EMNLP 2019).\n\n Args:\n left_pad_target (bool, optional): Whether or not the\n hypothesis should be left padded or not when they are\n teacher forced for generating alignments.\n '
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target<|docstring|>Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.<|endoftext|>
|
ba8627d0d3d1cb6aeb5a434d32ff2b256c6eaa99e11ae7efda764d2f20ace147
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
3x3 convolution with padding
|
lib/models/pose_resnet.py
|
conv3x3
|
humen-team/HRNet-for-Fashion-Landmark-Estimation.PyTorch
| 76 |
python
|
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)<|docstring|>3x3 convolution with padding<|endoftext|>
|
71d272a121ecf1c4894d6f87f923d5546659a63e57289f7aef7e6fe29e6ca6ac
|
def handle_nan(self, axis: int=1, how: str='any', mode: str='delete'):
"Handle the missing values.\n\n Arguments:\n axis: Determines if patients (0) or variables (1) with the missing values have to be fixed.\n how: Determines if handling is needed when there is at least one missing value ('any') or all of them are missing ('all').\n mode: Determines the strategy: 'delete' will delete the variable/patient, 'fill' will fill a missing value with the imputation method.\n "
if (mode == 'delete'):
self._feature_dataframe.dropna(axis=axis, how=how, inplace=True)
self._feature_outcome_dataframe.dropna(axis=axis, how=how, inplace=True)
self._feature_column = list(self._feature_dataframe.columns)
self._patient_name = list(self._feature_outcome_dataframe.index)
if (self._outcome_column in self._feature_column):
self._feature_column.remove(self._outcome_column)
self._outcome = self._feature_outcome_dataframe[self._outcome_column]
self._class_label = pd.unique(np.array(list(self._outcome)))
self._class_label.sort()
data_balance = []
for l in self._class_label:
data_balance.append((np.sum((np.array(list(self._outcome)) == l)) / len(self._outcome)))
print('Number of observations: {}\nClass labels: {}\nClasses balance: {}'.format(len(self._outcome), self._class_label, data_balance))
if (mode == 'fill'):
print('Not implemented yet')
return None
|
Handle the missing values.
Arguments:
axis: Determines if patients (0) or variables (1) with the missing values have to be fixed.
how: Determines if handling is needed when there is at least one missing value ('any') or all of them are missing ('all').
mode: Determines the strategy: 'delete' will delete the variable/patient, 'fill' will fill a missing value with the imputation method.
|
pmtool/AnalysisBox.py
|
handle_nan
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def handle_nan(self, axis: int=1, how: str='any', mode: str='delete'):
"Handle the missing values.\n\n Arguments:\n axis: Determines if patients (0) or variables (1) with the missing values have to be fixed.\n how: Determines if handling is needed when there is at least one missing value ('any') or all of them are missing ('all').\n mode: Determines the strategy: 'delete' will delete the variable/patient, 'fill' will fill a missing value with the imputation method.\n "
if (mode == 'delete'):
self._feature_dataframe.dropna(axis=axis, how=how, inplace=True)
self._feature_outcome_dataframe.dropna(axis=axis, how=how, inplace=True)
self._feature_column = list(self._feature_dataframe.columns)
self._patient_name = list(self._feature_outcome_dataframe.index)
if (self._outcome_column in self._feature_column):
self._feature_column.remove(self._outcome_column)
self._outcome = self._feature_outcome_dataframe[self._outcome_column]
self._class_label = pd.unique(np.array(list(self._outcome)))
self._class_label.sort()
data_balance = []
for l in self._class_label:
data_balance.append((np.sum((np.array(list(self._outcome)) == l)) / len(self._outcome)))
print('Number of observations: {}\nClass labels: {}\nClasses balance: {}'.format(len(self._outcome), self._class_label, data_balance))
if (mode == 'fill'):
print('Not implemented yet')
return None
|
def handle_nan(self, axis: int=1, how: str='any', mode: str='delete'):
"Handle the missing values.\n\n Arguments:\n axis: Determines if patients (0) or variables (1) with the missing values have to be fixed.\n how: Determines if handling is needed when there is at least one missing value ('any') or all of them are missing ('all').\n mode: Determines the strategy: 'delete' will delete the variable/patient, 'fill' will fill a missing value with the imputation method.\n "
if (mode == 'delete'):
self._feature_dataframe.dropna(axis=axis, how=how, inplace=True)
self._feature_outcome_dataframe.dropna(axis=axis, how=how, inplace=True)
self._feature_column = list(self._feature_dataframe.columns)
self._patient_name = list(self._feature_outcome_dataframe.index)
if (self._outcome_column in self._feature_column):
self._feature_column.remove(self._outcome_column)
self._outcome = self._feature_outcome_dataframe[self._outcome_column]
self._class_label = pd.unique(np.array(list(self._outcome)))
self._class_label.sort()
data_balance = []
for l in self._class_label:
data_balance.append((np.sum((np.array(list(self._outcome)) == l)) / len(self._outcome)))
print('Number of observations: {}\nClass labels: {}\nClasses balance: {}'.format(len(self._outcome), self._class_label, data_balance))
if (mode == 'fill'):
print('Not implemented yet')
return None<|docstring|>Handle the missing values.
Arguments:
axis: Determines if patients (0) or variables (1) with the missing values have to be fixed.
how: Determines if handling is needed when there is at least one missing value ('any') or all of them are missing ('all').
mode: Determines the strategy: 'delete' will delete the variable/patient, 'fill' will fill a missing value with the imputation method.<|endoftext|>
|
af6e8e2b57f636fdc32285b18c0fd365bd2239ded6ec3a476fad8fe7c6790496
|
def handle_constant(self):
'Drop the features with the constant values.'
constant_features = self._feature_dataframe.columns[(self._feature_dataframe.nunique() <= 1)]
self._feature_dataframe.drop(constant_features, axis=1, inplace=True)
self._feature_outcome_dataframe.drop(constant_features, axis=1, inplace=True)
self._feature_column = list(self._feature_dataframe.columns)
if (self._outcome_column in self._feature_column):
self._feature_column.remove(self._outcome_column)
self._outcome = self._feature_outcome_dataframe[self._outcome_column]
self._class_label = pd.unique(np.array(list(self._outcome)))
self._class_label.sort()
data_balance = []
for l in self._class_label:
data_balance.append((np.sum((np.array(list(self._outcome)) == l)) / len(self._outcome)))
print('Number of observations: {}\nClass labels: {}\nClasses balance: {}'.format(len(self._outcome), self._class_label, data_balance))
return None
|
Drop the features with the constant values.
|
pmtool/AnalysisBox.py
|
handle_constant
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def handle_constant(self):
constant_features = self._feature_dataframe.columns[(self._feature_dataframe.nunique() <= 1)]
self._feature_dataframe.drop(constant_features, axis=1, inplace=True)
self._feature_outcome_dataframe.drop(constant_features, axis=1, inplace=True)
self._feature_column = list(self._feature_dataframe.columns)
if (self._outcome_column in self._feature_column):
self._feature_column.remove(self._outcome_column)
self._outcome = self._feature_outcome_dataframe[self._outcome_column]
self._class_label = pd.unique(np.array(list(self._outcome)))
self._class_label.sort()
data_balance = []
for l in self._class_label:
data_balance.append((np.sum((np.array(list(self._outcome)) == l)) / len(self._outcome)))
print('Number of observations: {}\nClass labels: {}\nClasses balance: {}'.format(len(self._outcome), self._class_label, data_balance))
return None
|
def handle_constant(self):
constant_features = self._feature_dataframe.columns[(self._feature_dataframe.nunique() <= 1)]
self._feature_dataframe.drop(constant_features, axis=1, inplace=True)
self._feature_outcome_dataframe.drop(constant_features, axis=1, inplace=True)
self._feature_column = list(self._feature_dataframe.columns)
if (self._outcome_column in self._feature_column):
self._feature_column.remove(self._outcome_column)
self._outcome = self._feature_outcome_dataframe[self._outcome_column]
self._class_label = pd.unique(np.array(list(self._outcome)))
self._class_label.sort()
data_balance = []
for l in self._class_label:
data_balance.append((np.sum((np.array(list(self._outcome)) == l)) / len(self._outcome)))
print('Number of observations: {}\nClass labels: {}\nClasses balance: {}'.format(len(self._outcome), self._class_label, data_balance))
return None<|docstring|>Drop the features with the constant values.<|endoftext|>
|
7cc32176163a236392b25f31c21b671961a3e1dbd4ff005eb1344882bd3d5dd6
|
def plot_distribution(self, features_to_plot: list=[], binary_classes_to_plot: list=[]):
'Plot distribution of the feature values in classes into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.\n '
if (len(self._outcome) > 0):
if (len(binary_classes_to_plot) == 2):
if ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label)):
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == binary_classes_to_plot[0])][feature], opacity=0.75, name=str(binary_classes_to_plot[0]), marker={'color': 'magenta'}, showlegend=(counter == 0)), r, c)
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == binary_classes_to_plot[1])][feature], opacity=0.75, name=str(binary_classes_to_plot[1]), marker={'color': 'orange'}, showlegend=(counter == 0)), r, c)
fig.update_xaxes(title_text='values', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='count', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
fig.layout.update(go.Layout(barmode='overlay'))
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text='Features binary distribution in classes', height=(rows * 250), width=1250)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_distr.html'), config={'scrollZoom': True})
else:
print('Wrong class label(s).')
else:
color_scheme = ['magenta', 'orange', 'cyan', 'yellow', 'lime', 'blue', 'red', 'green', 'darkviolet', 'saddlebrown']
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
counter_colors = 0
for cl in self._class_label:
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == cl)][feature], opacity=0.5, name=str(cl), marker={'color': color_scheme[counter_colors]}, showlegend=(counter == 0)), r, c)
counter_colors += 1
fig.update_xaxes(title_text='values', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='count', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
fig.layout.update(go.Layout(barmode='overlay'))
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text='Features binary distribution in classes', height=(rows * 250), width=1250)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_distr.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None
|
Plot distribution of the feature values in classes into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.
|
pmtool/AnalysisBox.py
|
plot_distribution
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def plot_distribution(self, features_to_plot: list=[], binary_classes_to_plot: list=[]):
'Plot distribution of the feature values in classes into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.\n '
if (len(self._outcome) > 0):
if (len(binary_classes_to_plot) == 2):
if ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label)):
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == binary_classes_to_plot[0])][feature], opacity=0.75, name=str(binary_classes_to_plot[0]), marker={'color': 'magenta'}, showlegend=(counter == 0)), r, c)
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == binary_classes_to_plot[1])][feature], opacity=0.75, name=str(binary_classes_to_plot[1]), marker={'color': 'orange'}, showlegend=(counter == 0)), r, c)
fig.update_xaxes(title_text='values', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='count', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
fig.layout.update(go.Layout(barmode='overlay'))
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text='Features binary distribution in classes', height=(rows * 250), width=1250)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_distr.html'), config={'scrollZoom': True})
else:
print('Wrong class label(s).')
else:
color_scheme = ['magenta', 'orange', 'cyan', 'yellow', 'lime', 'blue', 'red', 'green', 'darkviolet', 'saddlebrown']
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
counter_colors = 0
for cl in self._class_label:
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == cl)][feature], opacity=0.5, name=str(cl), marker={'color': color_scheme[counter_colors]}, showlegend=(counter == 0)), r, c)
counter_colors += 1
fig.update_xaxes(title_text='values', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='count', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
fig.layout.update(go.Layout(barmode='overlay'))
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text='Features binary distribution in classes', height=(rows * 250), width=1250)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_distr.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None
|
def plot_distribution(self, features_to_plot: list=[], binary_classes_to_plot: list=[]):
'Plot distribution of the feature values in classes into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.\n '
if (len(self._outcome) > 0):
if (len(binary_classes_to_plot) == 2):
if ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label)):
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == binary_classes_to_plot[0])][feature], opacity=0.75, name=str(binary_classes_to_plot[0]), marker={'color': 'magenta'}, showlegend=(counter == 0)), r, c)
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == binary_classes_to_plot[1])][feature], opacity=0.75, name=str(binary_classes_to_plot[1]), marker={'color': 'orange'}, showlegend=(counter == 0)), r, c)
fig.update_xaxes(title_text='values', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='count', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
fig.layout.update(go.Layout(barmode='overlay'))
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text='Features binary distribution in classes', height=(rows * 250), width=1250)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_distr.html'), config={'scrollZoom': True})
else:
print('Wrong class label(s).')
else:
color_scheme = ['magenta', 'orange', 'cyan', 'yellow', 'lime', 'blue', 'red', 'green', 'darkviolet', 'saddlebrown']
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
counter_colors = 0
for cl in self._class_label:
fig.append_trace(go.Histogram(x=self._feature_outcome_dataframe.loc[(self._feature_outcome_dataframe[self._outcome_column] == cl)][feature], opacity=0.5, name=str(cl), marker={'color': color_scheme[counter_colors]}, showlegend=(counter == 0)), r, c)
counter_colors += 1
fig.update_xaxes(title_text='values', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='count', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
fig.layout.update(go.Layout(barmode='overlay'))
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text='Features binary distribution in classes', height=(rows * 250), width=1250)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_distr.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None<|docstring|>Plot distribution of the feature values in classes into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.<|endoftext|>
|
dd12cb6269b83eb8d051f3655f102da3eec8ff6c23b4a65b7a8d00c8418240a4
|
def plot_correlation_matrix(self, features_to_plot: list=[]):
"Plot correlation (Spearman's) matrix for the features into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n "
pio.renderers.default = 'iframe'
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
data = go.Heatmap(z=np.abs(np.array(self._feature_dataframe[num_features].corr(method='spearman'))), x=num_features, y=num_features, colorbar=dict(title='Spearman corr'), hovertemplate='feature_1: %{x}<br>feature_2: %{y}<br>r_Spearman: %{z}<extra></extra>')
layout = {'title': 'Features correlation matrix'}
fig = go.Figure(data=data, layout=layout)
fig.update_xaxes(tickfont=dict(size=7))
fig.update_yaxes(tickfont=dict(size=7))
fig.update_layout(height=((len(num_features) * 20) + 250), width=((len(num_features) * 20) + 250))
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_corr.html'), config={'scrollZoom': True})
return None
|
Plot correlation (Spearman's) matrix for the features into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
|
pmtool/AnalysisBox.py
|
plot_correlation_matrix
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def plot_correlation_matrix(self, features_to_plot: list=[]):
"Plot correlation (Spearman's) matrix for the features into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n "
pio.renderers.default = 'iframe'
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
data = go.Heatmap(z=np.abs(np.array(self._feature_dataframe[num_features].corr(method='spearman'))), x=num_features, y=num_features, colorbar=dict(title='Spearman corr'), hovertemplate='feature_1: %{x}<br>feature_2: %{y}<br>r_Spearman: %{z}<extra></extra>')
layout = {'title': 'Features correlation matrix'}
fig = go.Figure(data=data, layout=layout)
fig.update_xaxes(tickfont=dict(size=7))
fig.update_yaxes(tickfont=dict(size=7))
fig.update_layout(height=((len(num_features) * 20) + 250), width=((len(num_features) * 20) + 250))
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_corr.html'), config={'scrollZoom': True})
return None
|
def plot_correlation_matrix(self, features_to_plot: list=[]):
"Plot correlation (Spearman's) matrix for the features into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n "
pio.renderers.default = 'iframe'
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
data = go.Heatmap(z=np.abs(np.array(self._feature_dataframe[num_features].corr(method='spearman'))), x=num_features, y=num_features, colorbar=dict(title='Spearman corr'), hovertemplate='feature_1: %{x}<br>feature_2: %{y}<br>r_Spearman: %{z}<extra></extra>')
layout = {'title': 'Features correlation matrix'}
fig = go.Figure(data=data, layout=layout)
fig.update_xaxes(tickfont=dict(size=7))
fig.update_yaxes(tickfont=dict(size=7))
fig.update_layout(height=((len(num_features) * 20) + 250), width=((len(num_features) * 20) + 250))
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_corr.html'), config={'scrollZoom': True})
return None<|docstring|>Plot correlation (Spearman's) matrix for the features into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).<|endoftext|>
|
c4817615ba5b122e27c39dfd2e32ce78be5a7f7022932741b831107fc7ee3828
|
def plot_MW_p(self, features_to_plot: list=[], binary_classes_to_plot: list=[], p_threshold: float=0.05):
'Plot two-sided Mann-Whitney U test p-values for comparison of features values means in 2 classes (with correction for multiple testing) into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.\n p_threshold: Significance level.\n '
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
binary_classes_to_plot = self._class_label
elif (len(binary_classes_to_plot) != 2):
print('Only binary class labels are supported.')
return
elif (not ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label))):
print('Wrong class label(s).')
return
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
p_MW_corr = self.__get_MW_p(ftrs=num_features, binary_classes_to_plot=binary_classes_to_plot, p_threshold=p_threshold)
colors = [self.__get_color(v, p_threshold) for v in p_MW_corr]
shapes = [{'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': p_threshold, 'y0': 0, 'x1': p_threshold, 'y1': len(num_features)}]
annotations = [go.layout.Annotation(x=math.log10(p_threshold), y=len(num_features), text=('alpha=' + str(p_threshold)), showarrow=True, arrowhead=7, ax=100, ay=0)]
layout = plotly.graph_objs.Layout(shapes=shapes)
fig = go.Figure([go.Bar(x=list(p_MW_corr), y=num_features, marker={'color': colors}, orientation='h')], layout=layout)
fig.update_yaxes(tickfont=dict(size=7))
fig.update_xaxes(tickfont=dict(size=7))
fig.update_layout(title_text='The p-values for Mann-Whitney test (Bonferroni corrected)', height=((len(num_features) * 20) + 250), width=750, xaxis_type='log', annotations=annotations, xaxis={'mirror': 'allticks', 'side': 'top', 'dtick': 1, 'showgrid': True})
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_MW.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None
|
Plot two-sided Mann-Whitney U test p-values for comparison of features values means in 2 classes (with correction for multiple testing) into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.
p_threshold: Significance level.
|
pmtool/AnalysisBox.py
|
plot_MW_p
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def plot_MW_p(self, features_to_plot: list=[], binary_classes_to_plot: list=[], p_threshold: float=0.05):
'Plot two-sided Mann-Whitney U test p-values for comparison of features values means in 2 classes (with correction for multiple testing) into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.\n p_threshold: Significance level.\n '
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
binary_classes_to_plot = self._class_label
elif (len(binary_classes_to_plot) != 2):
print('Only binary class labels are supported.')
return
elif (not ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label))):
print('Wrong class label(s).')
return
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
p_MW_corr = self.__get_MW_p(ftrs=num_features, binary_classes_to_plot=binary_classes_to_plot, p_threshold=p_threshold)
colors = [self.__get_color(v, p_threshold) for v in p_MW_corr]
shapes = [{'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': p_threshold, 'y0': 0, 'x1': p_threshold, 'y1': len(num_features)}]
annotations = [go.layout.Annotation(x=math.log10(p_threshold), y=len(num_features), text=('alpha=' + str(p_threshold)), showarrow=True, arrowhead=7, ax=100, ay=0)]
layout = plotly.graph_objs.Layout(shapes=shapes)
fig = go.Figure([go.Bar(x=list(p_MW_corr), y=num_features, marker={'color': colors}, orientation='h')], layout=layout)
fig.update_yaxes(tickfont=dict(size=7))
fig.update_xaxes(tickfont=dict(size=7))
fig.update_layout(title_text='The p-values for Mann-Whitney test (Bonferroni corrected)', height=((len(num_features) * 20) + 250), width=750, xaxis_type='log', annotations=annotations, xaxis={'mirror': 'allticks', 'side': 'top', 'dtick': 1, 'showgrid': True})
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_MW.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None
|
def plot_MW_p(self, features_to_plot: list=[], binary_classes_to_plot: list=[], p_threshold: float=0.05):
'Plot two-sided Mann-Whitney U test p-values for comparison of features values means in 2 classes (with correction for multiple testing) into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.\n p_threshold: Significance level.\n '
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
binary_classes_to_plot = self._class_label
elif (len(binary_classes_to_plot) != 2):
print('Only binary class labels are supported.')
return
elif (not ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label))):
print('Wrong class label(s).')
return
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
p_MW_corr = self.__get_MW_p(ftrs=num_features, binary_classes_to_plot=binary_classes_to_plot, p_threshold=p_threshold)
colors = [self.__get_color(v, p_threshold) for v in p_MW_corr]
shapes = [{'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': p_threshold, 'y0': 0, 'x1': p_threshold, 'y1': len(num_features)}]
annotations = [go.layout.Annotation(x=math.log10(p_threshold), y=len(num_features), text=('alpha=' + str(p_threshold)), showarrow=True, arrowhead=7, ax=100, ay=0)]
layout = plotly.graph_objs.Layout(shapes=shapes)
fig = go.Figure([go.Bar(x=list(p_MW_corr), y=num_features, marker={'color': colors}, orientation='h')], layout=layout)
fig.update_yaxes(tickfont=dict(size=7))
fig.update_xaxes(tickfont=dict(size=7))
fig.update_layout(title_text='The p-values for Mann-Whitney test (Bonferroni corrected)', height=((len(num_features) * 20) + 250), width=750, xaxis_type='log', annotations=annotations, xaxis={'mirror': 'allticks', 'side': 'top', 'dtick': 1, 'showgrid': True})
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_MW.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None<|docstring|>Plot two-sided Mann-Whitney U test p-values for comparison of features values means in 2 classes (with correction for multiple testing) into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
binary_classes_to_plot: List, containing 2 classes of interest, if the dataset is multi-class.
p_threshold: Significance level.<|endoftext|>
|
0cb72ce6f0aa7fc26b9e54306d4160bc510ba1015646311d9384c87ce243d014
|
def plot_univariate_roc(self, features_to_plot: list=[], binary_classes_to_plot: list=[], auc_threshold: float=0.75):
'Plot univariate ROC curves (with AUC calculation) for threshold binary classifier, based of each feature separately into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest in case of multi-class data.\n auc_threshold: Threshold value for ROC AUC to be highlighted.\n '
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
binary_classes_to_plot = self._class_label
elif (len(binary_classes_to_plot) != 2):
print('Only binary class labels are supported.')
return
elif (not ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label))):
print('Wrong class label(s).')
return
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_outcome_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
(fprs, tprs) = self.__get_univar_fprs_tprs(ftr=feature, binary_classes_to_plot=binary_classes_to_plot)
univar_auc = auc(fprs, tprs)
fig.append_trace(go.Scatter(x=fprs, y=tprs, name='ROC', marker={'color': self.__get_color(univar_auc, auc_threshold)}), r, c)
fig.append_trace(go.Scatter(x=[0, 1], y=[0, 1], name='Chance', marker={'color': 'grey'}, mode='lines+markers+text', text=[(' ROC AUC=%0.2f' % univar_auc), ''], textposition='middle right'), r, c)
fig.update_xaxes(title_text='FPR', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='TPR', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text=('Features univariate ROC-curves:' + str(binary_classes_to_plot)), height=(rows * 250), width=1250, showlegend=False)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_roc-univar.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None
|
Plot univariate ROC curves (with AUC calculation) for threshold binary classifier, based of each feature separately into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
binary_classes_to_plot: List, containing 2 classes of interest in case of multi-class data.
auc_threshold: Threshold value for ROC AUC to be highlighted.
|
pmtool/AnalysisBox.py
|
plot_univariate_roc
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def plot_univariate_roc(self, features_to_plot: list=[], binary_classes_to_plot: list=[], auc_threshold: float=0.75):
'Plot univariate ROC curves (with AUC calculation) for threshold binary classifier, based of each feature separately into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest in case of multi-class data.\n auc_threshold: Threshold value for ROC AUC to be highlighted.\n '
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
binary_classes_to_plot = self._class_label
elif (len(binary_classes_to_plot) != 2):
print('Only binary class labels are supported.')
return
elif (not ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label))):
print('Wrong class label(s).')
return
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_outcome_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
(fprs, tprs) = self.__get_univar_fprs_tprs(ftr=feature, binary_classes_to_plot=binary_classes_to_plot)
univar_auc = auc(fprs, tprs)
fig.append_trace(go.Scatter(x=fprs, y=tprs, name='ROC', marker={'color': self.__get_color(univar_auc, auc_threshold)}), r, c)
fig.append_trace(go.Scatter(x=[0, 1], y=[0, 1], name='Chance', marker={'color': 'grey'}, mode='lines+markers+text', text=[(' ROC AUC=%0.2f' % univar_auc), ], textposition='middle right'), r, c)
fig.update_xaxes(title_text='FPR', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='TPR', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text=('Features univariate ROC-curves:' + str(binary_classes_to_plot)), height=(rows * 250), width=1250, showlegend=False)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_roc-univar.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None
|
def plot_univariate_roc(self, features_to_plot: list=[], binary_classes_to_plot: list=[], auc_threshold: float=0.75):
'Plot univariate ROC curves (with AUC calculation) for threshold binary classifier, based of each feature separately into interactive .html report.\n\n Arguments:\n features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).\n binary_classes_to_plot: List, containing 2 classes of interest in case of multi-class data.\n auc_threshold: Threshold value for ROC AUC to be highlighted.\n '
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
binary_classes_to_plot = self._class_label
elif (len(binary_classes_to_plot) != 2):
print('Only binary class labels are supported.')
return
elif (not ((binary_classes_to_plot[0] in self._class_label) & (binary_classes_to_plot[1] in self._class_label))):
print('Wrong class label(s).')
return
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
for feature in features_to_plot:
if (self._feature_outcome_dataframe[feature].dtype != 'object'):
num_features.append(feature)
cols = 4
rows = ((len(num_features) // 4) + 1)
num_features_tuple = tuple(num_features)
fig = plotly.subplots.make_subplots(rows=rows, cols=cols, subplot_titles=num_features_tuple)
counter = 0
for feature in num_features:
c = ((counter % 4) + 1)
r = ((counter // 4) + 1)
(fprs, tprs) = self.__get_univar_fprs_tprs(ftr=feature, binary_classes_to_plot=binary_classes_to_plot)
univar_auc = auc(fprs, tprs)
fig.append_trace(go.Scatter(x=fprs, y=tprs, name='ROC', marker={'color': self.__get_color(univar_auc, auc_threshold)}), r, c)
fig.append_trace(go.Scatter(x=[0, 1], y=[0, 1], name='Chance', marker={'color': 'grey'}, mode='lines+markers+text', text=[(' ROC AUC=%0.2f' % univar_auc), ], textposition='middle right'), r, c)
fig.update_xaxes(title_text='FPR', title_font={'size': 10}, title_standoff=5, row=r, col=c, showgrid=False, zeroline=False)
fig.update_yaxes(title_text='TPR', title_font={'size': 10}, title_standoff=0, row=r, col=c, showgrid=False, zeroline=False)
counter += 1
for i in fig['layout']['annotations']:
i['font'] = dict(size=10)
fig.update_layout(title_text=('Features univariate ROC-curves:' + str(binary_classes_to_plot)), height=(rows * 250), width=1250, showlegend=False)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_roc-univar.html'), config={'scrollZoom': True})
else:
print('Outcome column should be presented')
return None<|docstring|>Plot univariate ROC curves (with AUC calculation) for threshold binary classifier, based of each feature separately into interactive .html report.
Arguments:
features_to_plot: List of specific features to be selected (otherwise selects all the numerical features).
binary_classes_to_plot: List, containing 2 classes of interest in case of multi-class data.
auc_threshold: Threshold value for ROC AUC to be highlighted.<|endoftext|>
|
66433115cddbdc9dc179b7a3f1550e7aad0f13e8ec5c4dc7f89b2e2aea901d07
|
def calculate_basic_stats(self, volume_feature: str=''):
"Calculate basic statistical scores (such as: number of missing values, mean, std, min, max, Mann-Whitney test p-values for binary classes, univariate ROC AUC for binary classes, Spearman's correlation with volume if volumetric feature name is sent to function) for each feature and save it to .csv file.\n\n Arguments:\n volume_feature: Name of the feature, which is considered as volume.\n "
num_features = []
for feature in self._feature_column:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
frame = {'NaN': self._feature_dataframe[num_features].isnull().sum(axis=0), 'Mean': self._feature_dataframe[num_features].mean(axis=0), 'Std': self._feature_dataframe[num_features].std(axis=0), 'Min': self._feature_dataframe[num_features].min(axis=0), 'Max': self._feature_dataframe[num_features].max(axis=0)}
stats_dataframe = pd.DataFrame(frame)
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
p_MW_corr = self.__get_MW_p(ftrs=num_features, binary_classes_to_plot=self._class_label)
univar_auc = []
for feature in num_features:
(fprs, tprs) = self.__get_univar_fprs_tprs(ftr=feature, binary_classes_to_plot=self._class_label)
univar_auc.append(auc(fprs, tprs))
stats_dataframe_ext = pd.DataFrame({'p_MW_corrected': p_MW_corr, 'univar_auc': univar_auc}, index=num_features)
stats_dataframe = pd.concat([stats_dataframe, stats_dataframe_ext], axis=1)
if volume_feature:
vol_corr = []
for feature in num_features:
vol_corr.append(sp.stats.spearmanr(self._feature_dataframe[feature], self._feature_dataframe[volume_feature])[0])
stats_dataframe = pd.concat([stats_dataframe, pd.DataFrame({'volume_corr': vol_corr}, index=num_features)], axis=1)
stats_dataframe.to_excel((os.path.splitext(self._feature_path)[0] + '_basic_stats.xlsx'))
return None
|
Calculate basic statistical scores (such as: number of missing values, mean, std, min, max, Mann-Whitney test p-values for binary classes, univariate ROC AUC for binary classes, Spearman's correlation with volume if volumetric feature name is sent to function) for each feature and save it to .csv file.
Arguments:
volume_feature: Name of the feature, which is considered as volume.
|
pmtool/AnalysisBox.py
|
calculate_basic_stats
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def calculate_basic_stats(self, volume_feature: str=):
"Calculate basic statistical scores (such as: number of missing values, mean, std, min, max, Mann-Whitney test p-values for binary classes, univariate ROC AUC for binary classes, Spearman's correlation with volume if volumetric feature name is sent to function) for each feature and save it to .csv file.\n\n Arguments:\n volume_feature: Name of the feature, which is considered as volume.\n "
num_features = []
for feature in self._feature_column:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
frame = {'NaN': self._feature_dataframe[num_features].isnull().sum(axis=0), 'Mean': self._feature_dataframe[num_features].mean(axis=0), 'Std': self._feature_dataframe[num_features].std(axis=0), 'Min': self._feature_dataframe[num_features].min(axis=0), 'Max': self._feature_dataframe[num_features].max(axis=0)}
stats_dataframe = pd.DataFrame(frame)
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
p_MW_corr = self.__get_MW_p(ftrs=num_features, binary_classes_to_plot=self._class_label)
univar_auc = []
for feature in num_features:
(fprs, tprs) = self.__get_univar_fprs_tprs(ftr=feature, binary_classes_to_plot=self._class_label)
univar_auc.append(auc(fprs, tprs))
stats_dataframe_ext = pd.DataFrame({'p_MW_corrected': p_MW_corr, 'univar_auc': univar_auc}, index=num_features)
stats_dataframe = pd.concat([stats_dataframe, stats_dataframe_ext], axis=1)
if volume_feature:
vol_corr = []
for feature in num_features:
vol_corr.append(sp.stats.spearmanr(self._feature_dataframe[feature], self._feature_dataframe[volume_feature])[0])
stats_dataframe = pd.concat([stats_dataframe, pd.DataFrame({'volume_corr': vol_corr}, index=num_features)], axis=1)
stats_dataframe.to_excel((os.path.splitext(self._feature_path)[0] + '_basic_stats.xlsx'))
return None
|
def calculate_basic_stats(self, volume_feature: str=):
"Calculate basic statistical scores (such as: number of missing values, mean, std, min, max, Mann-Whitney test p-values for binary classes, univariate ROC AUC for binary classes, Spearman's correlation with volume if volumetric feature name is sent to function) for each feature and save it to .csv file.\n\n Arguments:\n volume_feature: Name of the feature, which is considered as volume.\n "
num_features = []
for feature in self._feature_column:
if (self._feature_dataframe[feature].dtype != 'object'):
num_features.append(feature)
frame = {'NaN': self._feature_dataframe[num_features].isnull().sum(axis=0), 'Mean': self._feature_dataframe[num_features].mean(axis=0), 'Std': self._feature_dataframe[num_features].std(axis=0), 'Min': self._feature_dataframe[num_features].min(axis=0), 'Max': self._feature_dataframe[num_features].max(axis=0)}
stats_dataframe = pd.DataFrame(frame)
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
p_MW_corr = self.__get_MW_p(ftrs=num_features, binary_classes_to_plot=self._class_label)
univar_auc = []
for feature in num_features:
(fprs, tprs) = self.__get_univar_fprs_tprs(ftr=feature, binary_classes_to_plot=self._class_label)
univar_auc.append(auc(fprs, tprs))
stats_dataframe_ext = pd.DataFrame({'p_MW_corrected': p_MW_corr, 'univar_auc': univar_auc}, index=num_features)
stats_dataframe = pd.concat([stats_dataframe, stats_dataframe_ext], axis=1)
if volume_feature:
vol_corr = []
for feature in num_features:
vol_corr.append(sp.stats.spearmanr(self._feature_dataframe[feature], self._feature_dataframe[volume_feature])[0])
stats_dataframe = pd.concat([stats_dataframe, pd.DataFrame({'volume_corr': vol_corr}, index=num_features)], axis=1)
stats_dataframe.to_excel((os.path.splitext(self._feature_path)[0] + '_basic_stats.xlsx'))
return None<|docstring|>Calculate basic statistical scores (such as: number of missing values, mean, std, min, max, Mann-Whitney test p-values for binary classes, univariate ROC AUC for binary classes, Spearman's correlation with volume if volumetric feature name is sent to function) for each feature and save it to .csv file.
Arguments:
volume_feature: Name of the feature, which is considered as volume.<|endoftext|>
|
8841035a322029721371d55cd2a86cee91338ebbb806770d7ac0ee10e269d211
|
def volume_analysis(self, volume_feature: str='', auc_threshold: float=0.75, features_to_plot: list=[], corr_threshold: float=0.75):
'Calculate features correlation (Spearman’s) with volume and plot volume-based precision-recall curve.\n\n Arguments:\n volume_feature: Name of the feature, which is considered as volume.\n auc_threshold: Threshold value for area under precision-recall curve to be highlighted.\n features_to_plot: Specific features to be selected (otherwise selects all the numerical features)\n corr_threshold: Threshold value for absolute value for Spearman’s correlation coefficient to be considered as ‘strong correlation’.\n '
if volume_feature:
if (volume_feature in self._feature_column):
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
(precs, recs) = self.__get_univar_prec_rec(ftr=volume_feature)
univar_auc = auc(recs, precs)
fig = go.Figure()
fig.add_trace(go.Scatter(x=recs, y=precs, name='PRC', marker={'color': self.__get_color(univar_auc, auc_threshold)}))
fig.add_trace(go.Scatter(x=[0], y=[0], name='AUC', marker={'color': 'grey'}, mode='text', text=[('AUC=%0.2f' % univar_auc), ''], textposition='top right'))
fig.update_xaxes(title_text='recall', title_font={'size': 10}, title_standoff=5, showgrid=True, zeroline=False, range=[0, 1])
fig.update_yaxes(title_text='precision', title_font={'size': 10}, title_standoff=5, showgrid=True, zeroline=False, range=[0, 1])
fig.update_layout(title_text='Volume precision-recall curve', showlegend=False, width=500, height=500)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_volume_PRC.html'), config={'scrollZoom': True})
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
corr_vol = []
for feature in features_to_plot:
if (self._feature_outcome_dataframe[feature].dtype != 'object'):
num_features.append(feature)
corr_vol.append(sp.stats.spearmanr(self._feature_dataframe[feature], self._feature_dataframe[volume_feature])[0])
colors = [self.__get_color(v, corr_threshold) for v in corr_vol]
shapes = [{'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': corr_threshold, 'y0': 0, 'x1': corr_threshold, 'y1': len(num_features)}]
annotations = [go.layout.Annotation(x=corr_threshold, y=len(num_features), text=('r_S=' + str(corr_threshold)), showarrow=True, arrowhead=7, ax=100, ay=0)]
layout = plotly.graph_objs.Layout(shapes=shapes)
fig = go.Figure(layout=layout)
fig.add_trace(go.Bar(x=list(np.abs(corr_vol)), y=num_features, marker={'color': colors}, orientation='h', name='Spearman correlation'))
fig.update_yaxes(tickfont=dict(size=7))
fig.update_xaxes(tickfont=dict(size=7))
fig.update_layout(annotations=annotations, xaxis={'mirror': 'allticks', 'side': 'top', 'dtick': 1, 'showgrid': True}, title_text='Volume Spearman correlation', showlegend=False, width=750, height=((len(num_features) * 20) + 250))
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_volume_corr.html'), config={'scrollZoom': True})
return None
|
Calculate features correlation (Spearman’s) with volume and plot volume-based precision-recall curve.
Arguments:
volume_feature: Name of the feature, which is considered as volume.
auc_threshold: Threshold value for area under precision-recall curve to be highlighted.
features_to_plot: Specific features to be selected (otherwise selects all the numerical features)
corr_threshold: Threshold value for absolute value for Spearman’s correlation coefficient to be considered as ‘strong correlation’.
|
pmtool/AnalysisBox.py
|
volume_analysis
|
primakov/Precision-medicine-toolbox
| 23 |
python
|
def volume_analysis(self, volume_feature: str=, auc_threshold: float=0.75, features_to_plot: list=[], corr_threshold: float=0.75):
'Calculate features correlation (Spearman’s) with volume and plot volume-based precision-recall curve.\n\n Arguments:\n volume_feature: Name of the feature, which is considered as volume.\n auc_threshold: Threshold value for area under precision-recall curve to be highlighted.\n features_to_plot: Specific features to be selected (otherwise selects all the numerical features)\n corr_threshold: Threshold value for absolute value for Spearman’s correlation coefficient to be considered as ‘strong correlation’.\n '
if volume_feature:
if (volume_feature in self._feature_column):
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
(precs, recs) = self.__get_univar_prec_rec(ftr=volume_feature)
univar_auc = auc(recs, precs)
fig = go.Figure()
fig.add_trace(go.Scatter(x=recs, y=precs, name='PRC', marker={'color': self.__get_color(univar_auc, auc_threshold)}))
fig.add_trace(go.Scatter(x=[0], y=[0], name='AUC', marker={'color': 'grey'}, mode='text', text=[('AUC=%0.2f' % univar_auc), ], textposition='top right'))
fig.update_xaxes(title_text='recall', title_font={'size': 10}, title_standoff=5, showgrid=True, zeroline=False, range=[0, 1])
fig.update_yaxes(title_text='precision', title_font={'size': 10}, title_standoff=5, showgrid=True, zeroline=False, range=[0, 1])
fig.update_layout(title_text='Volume precision-recall curve', showlegend=False, width=500, height=500)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_volume_PRC.html'), config={'scrollZoom': True})
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
corr_vol = []
for feature in features_to_plot:
if (self._feature_outcome_dataframe[feature].dtype != 'object'):
num_features.append(feature)
corr_vol.append(sp.stats.spearmanr(self._feature_dataframe[feature], self._feature_dataframe[volume_feature])[0])
colors = [self.__get_color(v, corr_threshold) for v in corr_vol]
shapes = [{'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': corr_threshold, 'y0': 0, 'x1': corr_threshold, 'y1': len(num_features)}]
annotations = [go.layout.Annotation(x=corr_threshold, y=len(num_features), text=('r_S=' + str(corr_threshold)), showarrow=True, arrowhead=7, ax=100, ay=0)]
layout = plotly.graph_objs.Layout(shapes=shapes)
fig = go.Figure(layout=layout)
fig.add_trace(go.Bar(x=list(np.abs(corr_vol)), y=num_features, marker={'color': colors}, orientation='h', name='Spearman correlation'))
fig.update_yaxes(tickfont=dict(size=7))
fig.update_xaxes(tickfont=dict(size=7))
fig.update_layout(annotations=annotations, xaxis={'mirror': 'allticks', 'side': 'top', 'dtick': 1, 'showgrid': True}, title_text='Volume Spearman correlation', showlegend=False, width=750, height=((len(num_features) * 20) + 250))
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_volume_corr.html'), config={'scrollZoom': True})
return None
|
def volume_analysis(self, volume_feature: str=, auc_threshold: float=0.75, features_to_plot: list=[], corr_threshold: float=0.75):
'Calculate features correlation (Spearman’s) with volume and plot volume-based precision-recall curve.\n\n Arguments:\n volume_feature: Name of the feature, which is considered as volume.\n auc_threshold: Threshold value for area under precision-recall curve to be highlighted.\n features_to_plot: Specific features to be selected (otherwise selects all the numerical features)\n corr_threshold: Threshold value for absolute value for Spearman’s correlation coefficient to be considered as ‘strong correlation’.\n '
if volume_feature:
if (volume_feature in self._feature_column):
if (len(self._outcome) > 0):
if (len(self._class_label) == 2):
(precs, recs) = self.__get_univar_prec_rec(ftr=volume_feature)
univar_auc = auc(recs, precs)
fig = go.Figure()
fig.add_trace(go.Scatter(x=recs, y=precs, name='PRC', marker={'color': self.__get_color(univar_auc, auc_threshold)}))
fig.add_trace(go.Scatter(x=[0], y=[0], name='AUC', marker={'color': 'grey'}, mode='text', text=[('AUC=%0.2f' % univar_auc), ], textposition='top right'))
fig.update_xaxes(title_text='recall', title_font={'size': 10}, title_standoff=5, showgrid=True, zeroline=False, range=[0, 1])
fig.update_yaxes(title_text='precision', title_font={'size': 10}, title_standoff=5, showgrid=True, zeroline=False, range=[0, 1])
fig.update_layout(title_text='Volume precision-recall curve', showlegend=False, width=500, height=500)
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_volume_PRC.html'), config={'scrollZoom': True})
if (not features_to_plot):
features_to_plot = self._feature_column
num_features = []
corr_vol = []
for feature in features_to_plot:
if (self._feature_outcome_dataframe[feature].dtype != 'object'):
num_features.append(feature)
corr_vol.append(sp.stats.spearmanr(self._feature_dataframe[feature], self._feature_dataframe[volume_feature])[0])
colors = [self.__get_color(v, corr_threshold) for v in corr_vol]
shapes = [{'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': corr_threshold, 'y0': 0, 'x1': corr_threshold, 'y1': len(num_features)}]
annotations = [go.layout.Annotation(x=corr_threshold, y=len(num_features), text=('r_S=' + str(corr_threshold)), showarrow=True, arrowhead=7, ax=100, ay=0)]
layout = plotly.graph_objs.Layout(shapes=shapes)
fig = go.Figure(layout=layout)
fig.add_trace(go.Bar(x=list(np.abs(corr_vol)), y=num_features, marker={'color': colors}, orientation='h', name='Spearman correlation'))
fig.update_yaxes(tickfont=dict(size=7))
fig.update_xaxes(tickfont=dict(size=7))
fig.update_layout(annotations=annotations, xaxis={'mirror': 'allticks', 'side': 'top', 'dtick': 1, 'showgrid': True}, title_text='Volume Spearman correlation', showlegend=False, width=750, height=((len(num_features) * 20) + 250))
plotly.offline.plot(fig, filename=(os.path.splitext(self._feature_path)[0] + '_volume_corr.html'), config={'scrollZoom': True})
return None<|docstring|>Calculate features correlation (Spearman’s) with volume and plot volume-based precision-recall curve.
Arguments:
volume_feature: Name of the feature, which is considered as volume.
auc_threshold: Threshold value for area under precision-recall curve to be highlighted.
features_to_plot: Specific features to be selected (otherwise selects all the numerical features)
corr_threshold: Threshold value for absolute value for Spearman’s correlation coefficient to be considered as ‘strong correlation’.<|endoftext|>
|
1b793271f5f01111812f7fd44d44b71d6c2241f08909e7a5d491ad371a9dd4ab
|
def __getitem__(self, efuse_name):
' Return the efuse field with the given name '
for e in self.efuses:
if (efuse_name == e.name):
return e
new_fields = False
for efuse in self._CUSTOM_MAC:
if (efuse[0] == efuse_name):
self.efuses += [EfuseField.from_tuple(self, efuse, efuse[8]) for efuse in self._CUSTOM_MAC]
new_fields = True
for efuse in self._ADC_CALIBRATION:
if (efuse[0] == efuse_name):
self.efuses += [EfuseField.from_tuple(self, efuse, efuse[8]) for efuse in self._ADC_CALIBRATION]
new_fields = True
if new_fields:
for e in self.efuses:
if (efuse_name == e.name):
return e
raise KeyError
|
Return the efuse field with the given name
|
MicroPython/esptool-master/espressif/efuse/esp32/fields.py
|
__getitem__
|
hu-tianyi/AuTrix
| 1 |
python
|
def __getitem__(self, efuse_name):
' '
for e in self.efuses:
if (efuse_name == e.name):
return e
new_fields = False
for efuse in self._CUSTOM_MAC:
if (efuse[0] == efuse_name):
self.efuses += [EfuseField.from_tuple(self, efuse, efuse[8]) for efuse in self._CUSTOM_MAC]
new_fields = True
for efuse in self._ADC_CALIBRATION:
if (efuse[0] == efuse_name):
self.efuses += [EfuseField.from_tuple(self, efuse, efuse[8]) for efuse in self._ADC_CALIBRATION]
new_fields = True
if new_fields:
for e in self.efuses:
if (efuse_name == e.name):
return e
raise KeyError
|
def __getitem__(self, efuse_name):
' '
for e in self.efuses:
if (efuse_name == e.name):
return e
new_fields = False
for efuse in self._CUSTOM_MAC:
if (efuse[0] == efuse_name):
self.efuses += [EfuseField.from_tuple(self, efuse, efuse[8]) for efuse in self._CUSTOM_MAC]
new_fields = True
for efuse in self._ADC_CALIBRATION:
if (efuse[0] == efuse_name):
self.efuses += [EfuseField.from_tuple(self, efuse, efuse[8]) for efuse in self._ADC_CALIBRATION]
new_fields = True
if new_fields:
for e in self.efuses:
if (efuse_name == e.name):
return e
raise KeyError<|docstring|>Return the efuse field with the given name<|endoftext|>
|
f24c0686e0c2b1ef85a45c65c7e4e176af87c24595277ca41351be7c74b37576
|
def write_efuses(self, block):
' Write the values in the efuse write registers to\n the efuse hardware, then refresh the efuse read registers.\n '
apb_freq = self._esp.get_crystal_freq()
(clk_sel0, clk_sel1, dac_clk_div) = self.EFUSE_CLK_SETTINGS[apb_freq]
self.update_reg(self.EFUSE_DAC_CONF_REG, self.EFUSE_DAC_CLK_DIV_MASK, dac_clk_div)
self.update_reg(self.EFUSE_CLK_REG, self.EFUSE_CLK_SEL0_MASK, clk_sel0)
self.update_reg(self.EFUSE_CLK_REG, self.EFUSE_CLK_SEL1_MASK, clk_sel1)
self.write_reg(self.EFUSE_REG_CONF, self.EFUSE_CONF_WRITE)
self.write_reg(self.EFUSE_REG_CMD, self.EFUSE_CMD_WRITE)
def wait_idle():
deadline = (time.time() + self.EFUSE_BURN_TIMEOUT)
while (time.time() < deadline):
if (self.read_reg(self.EFUSE_REG_CMD) == 0):
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
wait_idle()
self.write_reg(self.EFUSE_REG_CONF, self.EFUSE_CONF_READ)
self.write_reg(self.EFUSE_REG_CMD, self.EFUSE_CMD_READ)
wait_idle()
return self.get_coding_scheme_warnings()
|
Write the values in the efuse write registers to
the efuse hardware, then refresh the efuse read registers.
|
MicroPython/esptool-master/espressif/efuse/esp32/fields.py
|
write_efuses
|
hu-tianyi/AuTrix
| 1 |
python
|
def write_efuses(self, block):
' Write the values in the efuse write registers to\n the efuse hardware, then refresh the efuse read registers.\n '
apb_freq = self._esp.get_crystal_freq()
(clk_sel0, clk_sel1, dac_clk_div) = self.EFUSE_CLK_SETTINGS[apb_freq]
self.update_reg(self.EFUSE_DAC_CONF_REG, self.EFUSE_DAC_CLK_DIV_MASK, dac_clk_div)
self.update_reg(self.EFUSE_CLK_REG, self.EFUSE_CLK_SEL0_MASK, clk_sel0)
self.update_reg(self.EFUSE_CLK_REG, self.EFUSE_CLK_SEL1_MASK, clk_sel1)
self.write_reg(self.EFUSE_REG_CONF, self.EFUSE_CONF_WRITE)
self.write_reg(self.EFUSE_REG_CMD, self.EFUSE_CMD_WRITE)
def wait_idle():
deadline = (time.time() + self.EFUSE_BURN_TIMEOUT)
while (time.time() < deadline):
if (self.read_reg(self.EFUSE_REG_CMD) == 0):
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
wait_idle()
self.write_reg(self.EFUSE_REG_CONF, self.EFUSE_CONF_READ)
self.write_reg(self.EFUSE_REG_CMD, self.EFUSE_CMD_READ)
wait_idle()
return self.get_coding_scheme_warnings()
|
def write_efuses(self, block):
' Write the values in the efuse write registers to\n the efuse hardware, then refresh the efuse read registers.\n '
apb_freq = self._esp.get_crystal_freq()
(clk_sel0, clk_sel1, dac_clk_div) = self.EFUSE_CLK_SETTINGS[apb_freq]
self.update_reg(self.EFUSE_DAC_CONF_REG, self.EFUSE_DAC_CLK_DIV_MASK, dac_clk_div)
self.update_reg(self.EFUSE_CLK_REG, self.EFUSE_CLK_SEL0_MASK, clk_sel0)
self.update_reg(self.EFUSE_CLK_REG, self.EFUSE_CLK_SEL1_MASK, clk_sel1)
self.write_reg(self.EFUSE_REG_CONF, self.EFUSE_CONF_WRITE)
self.write_reg(self.EFUSE_REG_CMD, self.EFUSE_CMD_WRITE)
def wait_idle():
deadline = (time.time() + self.EFUSE_BURN_TIMEOUT)
while (time.time() < deadline):
if (self.read_reg(self.EFUSE_REG_CMD) == 0):
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
wait_idle()
self.write_reg(self.EFUSE_REG_CONF, self.EFUSE_CONF_READ)
self.write_reg(self.EFUSE_REG_CMD, self.EFUSE_CMD_READ)
wait_idle()
return self.get_coding_scheme_warnings()<|docstring|>Write the values in the efuse write registers to
the efuse hardware, then refresh the efuse read registers.<|endoftext|>
|
9c7b579c4a6c6009aa2b65f15b64763e44f8202678007e61811fd62d134df014
|
def get_coding_scheme_warnings(self):
' Check if the coding scheme has detected any errors.\n Meaningless for default coding scheme (0)\n '
return (self.read_reg(self.EFUSE_REG_DEC_STATUS) & self.EFUSE_REG_DEC_STATUS_MASK)
|
Check if the coding scheme has detected any errors.
Meaningless for default coding scheme (0)
|
MicroPython/esptool-master/espressif/efuse/esp32/fields.py
|
get_coding_scheme_warnings
|
hu-tianyi/AuTrix
| 1 |
python
|
def get_coding_scheme_warnings(self):
' Check if the coding scheme has detected any errors.\n Meaningless for default coding scheme (0)\n '
return (self.read_reg(self.EFUSE_REG_DEC_STATUS) & self.EFUSE_REG_DEC_STATUS_MASK)
|
def get_coding_scheme_warnings(self):
' Check if the coding scheme has detected any errors.\n Meaningless for default coding scheme (0)\n '
return (self.read_reg(self.EFUSE_REG_DEC_STATUS) & self.EFUSE_REG_DEC_STATUS_MASK)<|docstring|>Check if the coding scheme has detected any errors.
Meaningless for default coding scheme (0)<|endoftext|>
|
eb0c638bdd60bccb764e9b8857e03c898ced14a9d28e1c7892d962b70f49ada2
|
@staticmethod
def calc_crc(raw_mac):
'\n This algorithm is the equivalent of esp_crc8() in ESP32 ROM code\n\n This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.\n '
result = 0
for b in struct.unpack(('B' * 6), raw_mac):
result ^= b
for _ in range(8):
lsb = (result & 1)
result >>= 1
if (lsb != 0):
result ^= 140
return result
|
This algorithm is the equivalent of esp_crc8() in ESP32 ROM code
This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.
|
MicroPython/esptool-master/espressif/efuse/esp32/fields.py
|
calc_crc
|
hu-tianyi/AuTrix
| 1 |
python
|
@staticmethod
def calc_crc(raw_mac):
'\n This algorithm is the equivalent of esp_crc8() in ESP32 ROM code\n\n This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.\n '
result = 0
for b in struct.unpack(('B' * 6), raw_mac):
result ^= b
for _ in range(8):
lsb = (result & 1)
result >>= 1
if (lsb != 0):
result ^= 140
return result
|
@staticmethod
def calc_crc(raw_mac):
'\n This algorithm is the equivalent of esp_crc8() in ESP32 ROM code\n\n This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.\n '
result = 0
for b in struct.unpack(('B' * 6), raw_mac):
result ^= b
for _ in range(8):
lsb = (result & 1)
result >>= 1
if (lsb != 0):
result ^= 140
return result<|docstring|>This algorithm is the equivalent of esp_crc8() in ESP32 ROM code
This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.<|endoftext|>
|
996b36bd31bdee0fd5b645ffb739e6f344e8b2acecfa6bc88928d3343e4d50ea
|
def get_secret(the_secret, debugging):
'\n As a security measure, all secrets will be kept in a json file named\n secrets.json. This file will not be managed by the version control\n system, but will be available in our documentation repository or\n as an attached file. The main goal of this is that this file should\n not be viewable by no one except us or our team.\n '
try:
secrets_file = os.path.join(BASE_DIR, 'settings', 'secrets.json')
secretjson = json.load(open(secrets_file))
if debugging:
return secretjson['local'][the_secret]
else:
return secretjson['production'][the_secret]
except Exception as e:
print('Something weird happened while retrieving a secret: {}'.format(e))
sys.exit((- 1))
|
As a security measure, all secrets will be kept in a json file named
secrets.json. This file will not be managed by the version control
system, but will be available in our documentation repository or
as an attached file. The main goal of this is that this file should
not be viewable by no one except us or our team.
|
gizmo/gizmo/settings/test.py
|
get_secret
|
azaleas/gizmo
| 1 |
python
|
def get_secret(the_secret, debugging):
'\n As a security measure, all secrets will be kept in a json file named\n secrets.json. This file will not be managed by the version control\n system, but will be available in our documentation repository or\n as an attached file. The main goal of this is that this file should\n not be viewable by no one except us or our team.\n '
try:
secrets_file = os.path.join(BASE_DIR, 'settings', 'secrets.json')
secretjson = json.load(open(secrets_file))
if debugging:
return secretjson['local'][the_secret]
else:
return secretjson['production'][the_secret]
except Exception as e:
print('Something weird happened while retrieving a secret: {}'.format(e))
sys.exit((- 1))
|
def get_secret(the_secret, debugging):
'\n As a security measure, all secrets will be kept in a json file named\n secrets.json. This file will not be managed by the version control\n system, but will be available in our documentation repository or\n as an attached file. The main goal of this is that this file should\n not be viewable by no one except us or our team.\n '
try:
secrets_file = os.path.join(BASE_DIR, 'settings', 'secrets.json')
secretjson = json.load(open(secrets_file))
if debugging:
return secretjson['local'][the_secret]
else:
return secretjson['production'][the_secret]
except Exception as e:
print('Something weird happened while retrieving a secret: {}'.format(e))
sys.exit((- 1))<|docstring|>As a security measure, all secrets will be kept in a json file named
secrets.json. This file will not be managed by the version control
system, but will be available in our documentation repository or
as an attached file. The main goal of this is that this file should
not be viewable by no one except us or our team.<|endoftext|>
|
f0a2b0e588df2a9f838479b6af1276fc234615efdf927947b0a620380ad87a39
|
def weighted_choice(choices):
'\n Given a list of weighted choices, choose one. Choices are a list of\n (weight, element) pairs.\n '
total = sum((w for (w, c) in choices))
r = uniform(0, total)
upto = 0
for (w, c) in choices:
if ((upto + w) >= r):
return c
upto += w
|
Given a list of weighted choices, choose one. Choices are a list of
(weight, element) pairs.
|
py_search/utils.py
|
weighted_choice
|
ctonic/py_search
| 4 |
python
|
def weighted_choice(choices):
'\n Given a list of weighted choices, choose one. Choices are a list of\n (weight, element) pairs.\n '
total = sum((w for (w, c) in choices))
r = uniform(0, total)
upto = 0
for (w, c) in choices:
if ((upto + w) >= r):
return c
upto += w
|
def weighted_choice(choices):
'\n Given a list of weighted choices, choose one. Choices are a list of\n (weight, element) pairs.\n '
total = sum((w for (w, c) in choices))
r = uniform(0, total)
upto = 0
for (w, c) in choices:
if ((upto + w) >= r):
return c
upto += w<|docstring|>Given a list of weighted choices, choose one. Choices are a list of
(weight, element) pairs.<|endoftext|>
|
474ac5c1d06227aa2b06e20a41a6d34a89f937b53bdd6f11ae8bd52fa64432d7
|
def compare_searches(problems, searches):
'\n A function for comparing different search algorithms on different problems.\n\n :param problems: problems to solve.\n :type problems: an iterator of problems (usually a list)\n :param searches: search algorithms to use.\n :type searches: an iterator of search functions (usually a list)\n '
table = []
for problem in problems:
for search in searches:
annotated_problem = AnnotatedProblem(problem)
start_time = timeit.default_timer()
try:
sol = next(search(annotated_problem))
elapsed = (timeit.default_timer() - start_time)
cost = sol.cost()
except StopIteration:
elapsed = (timeit.default_timer() - start_time)
cost = 'Failed'
table.append([problem.__class__.__name__, search.__name__, annotated_problem.goal_tests, annotated_problem.nodes_expanded, annotated_problem.nodes_evaluated, (('%0.3f' % cost) if isinstance(cost, float) else cost), (('%0.4f' % elapsed) if isinstance(elapsed, float) else elapsed)])
print(tabulate(table, headers=['Problem', 'Search Alg', 'Goal Tests', 'Nodes Expanded', 'Nodes Evaluated', 'Solution Cost', 'Runtime'], tablefmt='simple'))
|
A function for comparing different search algorithms on different problems.
:param problems: problems to solve.
:type problems: an iterator of problems (usually a list)
:param searches: search algorithms to use.
:type searches: an iterator of search functions (usually a list)
|
py_search/utils.py
|
compare_searches
|
ctonic/py_search
| 4 |
python
|
def compare_searches(problems, searches):
'\n A function for comparing different search algorithms on different problems.\n\n :param problems: problems to solve.\n :type problems: an iterator of problems (usually a list)\n :param searches: search algorithms to use.\n :type searches: an iterator of search functions (usually a list)\n '
table = []
for problem in problems:
for search in searches:
annotated_problem = AnnotatedProblem(problem)
start_time = timeit.default_timer()
try:
sol = next(search(annotated_problem))
elapsed = (timeit.default_timer() - start_time)
cost = sol.cost()
except StopIteration:
elapsed = (timeit.default_timer() - start_time)
cost = 'Failed'
table.append([problem.__class__.__name__, search.__name__, annotated_problem.goal_tests, annotated_problem.nodes_expanded, annotated_problem.nodes_evaluated, (('%0.3f' % cost) if isinstance(cost, float) else cost), (('%0.4f' % elapsed) if isinstance(elapsed, float) else elapsed)])
print(tabulate(table, headers=['Problem', 'Search Alg', 'Goal Tests', 'Nodes Expanded', 'Nodes Evaluated', 'Solution Cost', 'Runtime'], tablefmt='simple'))
|
def compare_searches(problems, searches):
'\n A function for comparing different search algorithms on different problems.\n\n :param problems: problems to solve.\n :type problems: an iterator of problems (usually a list)\n :param searches: search algorithms to use.\n :type searches: an iterator of search functions (usually a list)\n '
table = []
for problem in problems:
for search in searches:
annotated_problem = AnnotatedProblem(problem)
start_time = timeit.default_timer()
try:
sol = next(search(annotated_problem))
elapsed = (timeit.default_timer() - start_time)
cost = sol.cost()
except StopIteration:
elapsed = (timeit.default_timer() - start_time)
cost = 'Failed'
table.append([problem.__class__.__name__, search.__name__, annotated_problem.goal_tests, annotated_problem.nodes_expanded, annotated_problem.nodes_evaluated, (('%0.3f' % cost) if isinstance(cost, float) else cost), (('%0.4f' % elapsed) if isinstance(elapsed, float) else elapsed)])
print(tabulate(table, headers=['Problem', 'Search Alg', 'Goal Tests', 'Nodes Expanded', 'Nodes Evaluated', 'Solution Cost', 'Runtime'], tablefmt='simple'))<|docstring|>A function for comparing different search algorithms on different problems.
:param problems: problems to solve.
:type problems: an iterator of problems (usually a list)
:param searches: search algorithms to use.
:type searches: an iterator of search functions (usually a list)<|endoftext|>
|
d4eacdcbb914f1767dac8803a99fa17c1ce101ca7f5fd3b4c1c7afd21cfa4042
|
def timefun(f):
'\n A decorator function for calling Timer with autorange on the provided\n function.\n '
@wraps(f)
def wrapper(*args, **kwds):
try:
result = timeit.Timer(partial(f, *args, **kwds)).autorange()
except Exception:
result = [10, timeit.Timer(partial(f, *args, **kwds)).timeit(10)]
a = [a for a in args]
a += [('%s=%s' % (k, kwds[k])) for k in kwds]
print(('Timing %s%s: %0.7f (num runs=%i)' % (f.__name__, tuple(a), result[1], result[0])))
return f(*args, **kwds)
return wrapper
|
A decorator function for calling Timer with autorange on the provided
function.
|
py_search/utils.py
|
timefun
|
ctonic/py_search
| 4 |
python
|
def timefun(f):
'\n A decorator function for calling Timer with autorange on the provided\n function.\n '
@wraps(f)
def wrapper(*args, **kwds):
try:
result = timeit.Timer(partial(f, *args, **kwds)).autorange()
except Exception:
result = [10, timeit.Timer(partial(f, *args, **kwds)).timeit(10)]
a = [a for a in args]
a += [('%s=%s' % (k, kwds[k])) for k in kwds]
print(('Timing %s%s: %0.7f (num runs=%i)' % (f.__name__, tuple(a), result[1], result[0])))
return f(*args, **kwds)
return wrapper
|
def timefun(f):
'\n A decorator function for calling Timer with autorange on the provided\n function.\n '
@wraps(f)
def wrapper(*args, **kwds):
try:
result = timeit.Timer(partial(f, *args, **kwds)).autorange()
except Exception:
result = [10, timeit.Timer(partial(f, *args, **kwds)).timeit(10)]
a = [a for a in args]
a += [('%s=%s' % (k, kwds[k])) for k in kwds]
print(('Timing %s%s: %0.7f (num runs=%i)' % (f.__name__, tuple(a), result[1], result[0])))
return f(*args, **kwds)
return wrapper<|docstring|>A decorator function for calling Timer with autorange on the provided
function.<|endoftext|>
|
a82e5071d35a99eb48053f0f231b82673e39fd4ff3a66e86b143168dd9930d3d
|
def compile(self, target):
'Build PDFium with Skia.'
pdfium_dir = self.m.vars.checkout_root.join('pdfium')
with self.m.context(cwd=pdfium_dir):
depot_tools = self.m.vars.checkout_root.join('depot_tools')
self.m.git.checkout('https://chromium.googlesource.com/chromium/tools/depot_tools.git', dir_path=depot_tools, ref='EXAMPLE_KEY')
self.m.run(self.m.step, 'runhook', cmd=[depot_tools.join('gclient'), 'runhook', 'gn_linux64'])
self.m.run(self.m.step, 'sysroot', cmd=['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=amd64'])
gn_args = ['pdf_is_standalone=true', 'clang_use_chrome_plugins=false', 'is_component_build=false', 'is_debug=false']
if ('SkiaPaths' in self.m.vars.builder_name):
gn_args.append('pdf_use_skia_paths=true')
else:
gn_args.append('pdf_use_skia=true')
env = self.m.context.env
env['CHROMIUM_BUILDTOOLS_PATH'] = str(pdfium_dir.join('buildtools'))
with self.m.context(env=env):
self.m.run(self.m.step, 'gn_gen', cmd=['gn', 'gen', 'out/skia', ('--args=%s' % ' '.join(gn_args))])
self.m.run(self.m.step, 'build_pdfium', cmd=['ninja', '-C', 'out/skia', '-j100'])
|
Build PDFium with Skia.
|
infra/bots/recipe_modules/flavor/pdfium_flavor.py
|
compile
|
rust-canvas/skia
| 46 |
python
|
def compile(self, target):
pdfium_dir = self.m.vars.checkout_root.join('pdfium')
with self.m.context(cwd=pdfium_dir):
depot_tools = self.m.vars.checkout_root.join('depot_tools')
self.m.git.checkout('https://chromium.googlesource.com/chromium/tools/depot_tools.git', dir_path=depot_tools, ref='EXAMPLE_KEY')
self.m.run(self.m.step, 'runhook', cmd=[depot_tools.join('gclient'), 'runhook', 'gn_linux64'])
self.m.run(self.m.step, 'sysroot', cmd=['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=amd64'])
gn_args = ['pdf_is_standalone=true', 'clang_use_chrome_plugins=false', 'is_component_build=false', 'is_debug=false']
if ('SkiaPaths' in self.m.vars.builder_name):
gn_args.append('pdf_use_skia_paths=true')
else:
gn_args.append('pdf_use_skia=true')
env = self.m.context.env
env['CHROMIUM_BUILDTOOLS_PATH'] = str(pdfium_dir.join('buildtools'))
with self.m.context(env=env):
self.m.run(self.m.step, 'gn_gen', cmd=['gn', 'gen', 'out/skia', ('--args=%s' % ' '.join(gn_args))])
self.m.run(self.m.step, 'build_pdfium', cmd=['ninja', '-C', 'out/skia', '-j100'])
|
def compile(self, target):
pdfium_dir = self.m.vars.checkout_root.join('pdfium')
with self.m.context(cwd=pdfium_dir):
depot_tools = self.m.vars.checkout_root.join('depot_tools')
self.m.git.checkout('https://chromium.googlesource.com/chromium/tools/depot_tools.git', dir_path=depot_tools, ref='EXAMPLE_KEY')
self.m.run(self.m.step, 'runhook', cmd=[depot_tools.join('gclient'), 'runhook', 'gn_linux64'])
self.m.run(self.m.step, 'sysroot', cmd=['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=amd64'])
gn_args = ['pdf_is_standalone=true', 'clang_use_chrome_plugins=false', 'is_component_build=false', 'is_debug=false']
if ('SkiaPaths' in self.m.vars.builder_name):
gn_args.append('pdf_use_skia_paths=true')
else:
gn_args.append('pdf_use_skia=true')
env = self.m.context.env
env['CHROMIUM_BUILDTOOLS_PATH'] = str(pdfium_dir.join('buildtools'))
with self.m.context(env=env):
self.m.run(self.m.step, 'gn_gen', cmd=['gn', 'gen', 'out/skia', ('--args=%s' % ' '.join(gn_args))])
self.m.run(self.m.step, 'build_pdfium', cmd=['ninja', '-C', 'out/skia', '-j100'])<|docstring|>Build PDFium with Skia.<|endoftext|>
|
aa26aba3da01048e2589e1b65946f0844c5cbd675dfe09ec88d8a763d99032b7
|
def timer(func):
'Print the runtime of the decorated function'
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter_ns()
value = func(*args, **kwargs)
end_time = time.perf_counter_ns()
run_time = (end_time - start_time)
run_time = ((end_time - start_time) / 10000000.0)
print(f'[PERF] Finished {func.__name__!r} in {run_time:.4f} ms')
return value
return wrapper_timer
|
Print the runtime of the decorated function
|
samples/decorators.py
|
timer
|
shazz/DistributedOpenCL
| 1 |
python
|
def timer(func):
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter_ns()
value = func(*args, **kwargs)
end_time = time.perf_counter_ns()
run_time = (end_time - start_time)
run_time = ((end_time - start_time) / 10000000.0)
print(f'[PERF] Finished {func.__name__!r} in {run_time:.4f} ms')
return value
return wrapper_timer
|
def timer(func):
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter_ns()
value = func(*args, **kwargs)
end_time = time.perf_counter_ns()
run_time = (end_time - start_time)
run_time = ((end_time - start_time) / 10000000.0)
print(f'[PERF] Finished {func.__name__!r} in {run_time:.4f} ms')
return value
return wrapper_timer<|docstring|>Print the runtime of the decorated function<|endoftext|>
|
e1c2c761d3b6411ba1ada974398b772ec1bd6d56c11ea542ccbd1681be8624b7
|
def fetch_images_tag(pixabay_search_keyword, num_images):
'\n Fetches images from Pixabay w.r.t a keyword.\n :param pixabay_search_keyword: Keyword to perform the search on Pixabay.\n :param num_images: Number of images to retrieve.\n :return: List of PIL images.\n :return: List of image URLs.\n '
query = ((((PIXABAY_API + '&q=') + pixabay_search_keyword.lower()) + '&image_type=photo&safesearch=true&per_page=') + str(num_images))
logging.info(f'Making request to Pixabay for {num_images} images to fetch with {pixabay_search_keyword} keyword.')
start_time = time.time()
response = requests.get(query)
end_time = (time.time() - start_time)
logging.info(f'Fetched search results in {end_time:.3f} seconds.')
output = response.json()
all_images = []
all_image_urls = []
start_time = time.time()
for each in output['hits']:
imageurl = each['webformatURL']
response = requests.get(imageurl)
image = Image.open(BytesIO(response.content)).convert('RGB')
all_images.append(image)
all_image_urls.append(imageurl)
end_time = (time.time() - start_time)
logging.info(f'Fetched individual results in {end_time:.3f} seconds.')
return (all_images, all_image_urls)
|
Fetches images from Pixabay w.r.t a keyword.
:param pixabay_search_keyword: Keyword to perform the search on Pixabay.
:param num_images: Number of images to retrieve.
:return: List of PIL images.
:return: List of image URLs.
|
server/pixabay_utils.py
|
fetch_images_tag
|
deep-diver/image_search_with_natural_language
| 28 |
python
|
def fetch_images_tag(pixabay_search_keyword, num_images):
'\n Fetches images from Pixabay w.r.t a keyword.\n :param pixabay_search_keyword: Keyword to perform the search on Pixabay.\n :param num_images: Number of images to retrieve.\n :return: List of PIL images.\n :return: List of image URLs.\n '
query = ((((PIXABAY_API + '&q=') + pixabay_search_keyword.lower()) + '&image_type=photo&safesearch=true&per_page=') + str(num_images))
logging.info(f'Making request to Pixabay for {num_images} images to fetch with {pixabay_search_keyword} keyword.')
start_time = time.time()
response = requests.get(query)
end_time = (time.time() - start_time)
logging.info(f'Fetched search results in {end_time:.3f} seconds.')
output = response.json()
all_images = []
all_image_urls = []
start_time = time.time()
for each in output['hits']:
imageurl = each['webformatURL']
response = requests.get(imageurl)
image = Image.open(BytesIO(response.content)).convert('RGB')
all_images.append(image)
all_image_urls.append(imageurl)
end_time = (time.time() - start_time)
logging.info(f'Fetched individual results in {end_time:.3f} seconds.')
return (all_images, all_image_urls)
|
def fetch_images_tag(pixabay_search_keyword, num_images):
'\n Fetches images from Pixabay w.r.t a keyword.\n :param pixabay_search_keyword: Keyword to perform the search on Pixabay.\n :param num_images: Number of images to retrieve.\n :return: List of PIL images.\n :return: List of image URLs.\n '
query = ((((PIXABAY_API + '&q=') + pixabay_search_keyword.lower()) + '&image_type=photo&safesearch=true&per_page=') + str(num_images))
logging.info(f'Making request to Pixabay for {num_images} images to fetch with {pixabay_search_keyword} keyword.')
start_time = time.time()
response = requests.get(query)
end_time = (time.time() - start_time)
logging.info(f'Fetched search results in {end_time:.3f} seconds.')
output = response.json()
all_images = []
all_image_urls = []
start_time = time.time()
for each in output['hits']:
imageurl = each['webformatURL']
response = requests.get(imageurl)
image = Image.open(BytesIO(response.content)).convert('RGB')
all_images.append(image)
all_image_urls.append(imageurl)
end_time = (time.time() - start_time)
logging.info(f'Fetched individual results in {end_time:.3f} seconds.')
return (all_images, all_image_urls)<|docstring|>Fetches images from Pixabay w.r.t a keyword.
:param pixabay_search_keyword: Keyword to perform the search on Pixabay.
:param num_images: Number of images to retrieve.
:return: List of PIL images.
:return: List of image URLs.<|endoftext|>
|
dd1d5787932292fc418f30fb0a8b76750357c956fb02d65c0c9a6a23caa55e50
|
def get_logger(name: str):
'\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\n\n If a log should be called on all processes, pass `main_process_only=False`\n\n E.g.\n ```python\n logger.info("My log", main_process_only=False)\n logger.debug("My log", main_process_only=False)\n ```\n\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n '
logger = logging.getLogger(name)
return MultiProcessAdapter(logger, {})
|
Returns a `logging.Logger` for `name` that can handle multiprocessing.
If a log should be called on all processes, pass `main_process_only=False`
E.g.
```python
logger.info("My log", main_process_only=False)
logger.debug("My log", main_process_only=False)
```
Args:
name (`str`):
The name for the logger, such as `__file__`
|
src/accelerate/logging.py
|
get_logger
|
yuxinyuan/accelerate
| 0 |
python
|
def get_logger(name: str):
'\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\n\n If a log should be called on all processes, pass `main_process_only=False`\n\n E.g.\n ```python\n logger.info("My log", main_process_only=False)\n logger.debug("My log", main_process_only=False)\n ```\n\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n '
logger = logging.getLogger(name)
return MultiProcessAdapter(logger, {})
|
def get_logger(name: str):
'\n Returns a `logging.Logger` for `name` that can handle multiprocessing.\n\n If a log should be called on all processes, pass `main_process_only=False`\n\n E.g.\n ```python\n logger.info("My log", main_process_only=False)\n logger.debug("My log", main_process_only=False)\n ```\n\n Args:\n name (`str`):\n The name for the logger, such as `__file__`\n '
logger = logging.getLogger(name)
return MultiProcessAdapter(logger, {})<|docstring|>Returns a `logging.Logger` for `name` that can handle multiprocessing.
If a log should be called on all processes, pass `main_process_only=False`
E.g.
```python
logger.info("My log", main_process_only=False)
logger.debug("My log", main_process_only=False)
```
Args:
name (`str`):
The name for the logger, such as `__file__`<|endoftext|>
|
843d30e9202ceff18d5a1553bf22400a26e19a2d0fb3be292dc176662886241f
|
@staticmethod
def _should_log(main_process_only):
'Check if log should be performed'
return ((not main_process_only) or (main_process_only and (AcceleratorState().local_process_index == 0)))
|
Check if log should be performed
|
src/accelerate/logging.py
|
_should_log
|
yuxinyuan/accelerate
| 0 |
python
|
@staticmethod
def _should_log(main_process_only):
return ((not main_process_only) or (main_process_only and (AcceleratorState().local_process_index == 0)))
|
@staticmethod
def _should_log(main_process_only):
return ((not main_process_only) or (main_process_only and (AcceleratorState().local_process_index == 0)))<|docstring|>Check if log should be performed<|endoftext|>
|
1ab716131fcbea813e9405c6bc2380c331bc0f99bdb6a1ae28d8a0c1315ef229
|
def log(self, level, msg, *args, **kwargs):
'\n Delegates logger call after checking if we should log.\n\n Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes\n or only the main executed one. Default is `True` if not passed\n '
main_process_only = kwargs.pop('main_process_only', True)
if (self.isEnabledFor(level) and self._should_log(main_process_only)):
(msg, kwargs) = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
|
Delegates logger call after checking if we should log.
Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
or only the main executed one. Default is `True` if not passed
|
src/accelerate/logging.py
|
log
|
yuxinyuan/accelerate
| 0 |
python
|
def log(self, level, msg, *args, **kwargs):
'\n Delegates logger call after checking if we should log.\n\n Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes\n or only the main executed one. Default is `True` if not passed\n '
main_process_only = kwargs.pop('main_process_only', True)
if (self.isEnabledFor(level) and self._should_log(main_process_only)):
(msg, kwargs) = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
|
def log(self, level, msg, *args, **kwargs):
'\n Delegates logger call after checking if we should log.\n\n Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes\n or only the main executed one. Default is `True` if not passed\n '
main_process_only = kwargs.pop('main_process_only', True)
if (self.isEnabledFor(level) and self._should_log(main_process_only)):
(msg, kwargs) = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)<|docstring|>Delegates logger call after checking if we should log.
Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
or only the main executed one. Default is `True` if not passed<|endoftext|>
|
45f2b69e56974a39b6dff45de3e658b9a0d6f29f67119ef752a277929f3ce73a
|
def find(path, follow_symlinks=True):
'List files.\n\n Args:\n path (str): Path.\n follow_symlinks (bool): Follow symlink.\n\n Returns:\n list[str]: List of files.\n '
args = [config['binaries']['find']]
if follow_symlinks:
args += ['-L']
args += [path]
(out, _) = sp.Popen(args, stdout=sp.PIPE).communicate()
return filter(None, out.decode().split('\n'))
|
List files.
Args:
path (str): Path.
follow_symlinks (bool): Follow symlink.
Returns:
list[str]: List of files.
|
catalogue/bin.py
|
find
|
wesselb/catalogue
| 0 |
python
|
def find(path, follow_symlinks=True):
'List files.\n\n Args:\n path (str): Path.\n follow_symlinks (bool): Follow symlink.\n\n Returns:\n list[str]: List of files.\n '
args = [config['binaries']['find']]
if follow_symlinks:
args += ['-L']
args += [path]
(out, _) = sp.Popen(args, stdout=sp.PIPE).communicate()
return filter(None, out.decode().split('\n'))
|
def find(path, follow_symlinks=True):
'List files.\n\n Args:\n path (str): Path.\n follow_symlinks (bool): Follow symlink.\n\n Returns:\n list[str]: List of files.\n '
args = [config['binaries']['find']]
if follow_symlinks:
args += ['-L']
args += [path]
(out, _) = sp.Popen(args, stdout=sp.PIPE).communicate()
return filter(None, out.decode().split('\n'))<|docstring|>List files.
Args:
path (str): Path.
follow_symlinks (bool): Follow symlink.
Returns:
list[str]: List of files.<|endoftext|>
|
049130f5aa7a63e5047f336696f9bc118a56c772ca71ee8c2c6867986e2fe41c
|
def mdfind(path, query):
'Search for content.\n\n Args:\n path (str): Path to search on.\n query (str): Query to search for.\n\n Returns:\n list[str]: List of files that match the search query.\n '
args = [config['binaries']['mdfind'], '-onlyin', path, query]
(out, _) = sp.Popen(args, stdout=sp.PIPE).communicate()
results = filter(None, out.decode().split('\n'))
files = file_filter(find(path, follow_symlinks=True), None)
sym_paths = map(os.path.realpath, filter(os.path.islink, files))
return (results + [r for p in sym_paths for r in mdfind(p, query)])
|
Search for content.
Args:
path (str): Path to search on.
query (str): Query to search for.
Returns:
list[str]: List of files that match the search query.
|
catalogue/bin.py
|
mdfind
|
wesselb/catalogue
| 0 |
python
|
def mdfind(path, query):
'Search for content.\n\n Args:\n path (str): Path to search on.\n query (str): Query to search for.\n\n Returns:\n list[str]: List of files that match the search query.\n '
args = [config['binaries']['mdfind'], '-onlyin', path, query]
(out, _) = sp.Popen(args, stdout=sp.PIPE).communicate()
results = filter(None, out.decode().split('\n'))
files = file_filter(find(path, follow_symlinks=True), None)
sym_paths = map(os.path.realpath, filter(os.path.islink, files))
return (results + [r for p in sym_paths for r in mdfind(p, query)])
|
def mdfind(path, query):
'Search for content.\n\n Args:\n path (str): Path to search on.\n query (str): Query to search for.\n\n Returns:\n list[str]: List of files that match the search query.\n '
args = [config['binaries']['mdfind'], '-onlyin', path, query]
(out, _) = sp.Popen(args, stdout=sp.PIPE).communicate()
results = filter(None, out.decode().split('\n'))
files = file_filter(find(path, follow_symlinks=True), None)
sym_paths = map(os.path.realpath, filter(os.path.islink, files))
return (results + [r for p in sym_paths for r in mdfind(p, query)])<|docstring|>Search for content.
Args:
path (str): Path to search on.
query (str): Query to search for.
Returns:
list[str]: List of files that match the search query.<|endoftext|>
|
766f9b413b8412004d3f2b49414d59615d5c32e6e63df9f0e45990646378972c
|
def fzf(input, query=None):
'Fuzzy search.\n\n Args:\n input (str): Input to search through.\n query (str): Query.\n\n Returns:\n list[str]: Fuzzy matches.\n '
args = [config['binaries']['fzf']]
if (query is not None):
args += ['-f', query]
p = sp.Popen(args, stdin=sp.PIPE, stdout=sp.PIPE)
p.stdin.write(input.encode())
(out, _) = p.communicate()
return filter(None, out.decode().split('\n'))
|
Fuzzy search.
Args:
input (str): Input to search through.
query (str): Query.
Returns:
list[str]: Fuzzy matches.
|
catalogue/bin.py
|
fzf
|
wesselb/catalogue
| 0 |
python
|
def fzf(input, query=None):
'Fuzzy search.\n\n Args:\n input (str): Input to search through.\n query (str): Query.\n\n Returns:\n list[str]: Fuzzy matches.\n '
args = [config['binaries']['fzf']]
if (query is not None):
args += ['-f', query]
p = sp.Popen(args, stdin=sp.PIPE, stdout=sp.PIPE)
p.stdin.write(input.encode())
(out, _) = p.communicate()
return filter(None, out.decode().split('\n'))
|
def fzf(input, query=None):
'Fuzzy search.\n\n Args:\n input (str): Input to search through.\n query (str): Query.\n\n Returns:\n list[str]: Fuzzy matches.\n '
args = [config['binaries']['fzf']]
if (query is not None):
args += ['-f', query]
p = sp.Popen(args, stdin=sp.PIPE, stdout=sp.PIPE)
p.stdin.write(input.encode())
(out, _) = p.communicate()
return filter(None, out.decode().split('\n'))<|docstring|>Fuzzy search.
Args:
input (str): Input to search through.
query (str): Query.
Returns:
list[str]: Fuzzy matches.<|endoftext|>
|
3988ae9cd64a446efae1ec39cbab1605b749b8e9fadaa6800b56187ad245d1eb
|
def pbcopy(x):
'Copy text to clipboard.\n\n Args:\n x (str): Text to copy.\n '
sp.Popen([config['binaries']['pbcopy']], stdout=sp.PIPE, stdin=sp.PIPE).communicate(input=x.encode())
|
Copy text to clipboard.
Args:
x (str): Text to copy.
|
catalogue/bin.py
|
pbcopy
|
wesselb/catalogue
| 0 |
python
|
def pbcopy(x):
'Copy text to clipboard.\n\n Args:\n x (str): Text to copy.\n '
sp.Popen([config['binaries']['pbcopy']], stdout=sp.PIPE, stdin=sp.PIPE).communicate(input=x.encode())
|
def pbcopy(x):
'Copy text to clipboard.\n\n Args:\n x (str): Text to copy.\n '
sp.Popen([config['binaries']['pbcopy']], stdout=sp.PIPE, stdin=sp.PIPE).communicate(input=x.encode())<|docstring|>Copy text to clipboard.
Args:
x (str): Text to copy.<|endoftext|>
|
977418ce7bca2bafd017c0bf9eec4d6a4b7da10d71b335f248c19d8871d1680b
|
def pbpaste():
'Paste text from clipboard.\n\n Returns:\n str:Text from clipboard.\n '
(out, _) = sp.Popen([config['binaries']['pbpaste']], stdout=sp.PIPE).communicate()
return out.decode()
|
Paste text from clipboard.
Returns:
str:Text from clipboard.
|
catalogue/bin.py
|
pbpaste
|
wesselb/catalogue
| 0 |
python
|
def pbpaste():
'Paste text from clipboard.\n\n Returns:\n str:Text from clipboard.\n '
(out, _) = sp.Popen([config['binaries']['pbpaste']], stdout=sp.PIPE).communicate()
return out.decode()
|
def pbpaste():
'Paste text from clipboard.\n\n Returns:\n str:Text from clipboard.\n '
(out, _) = sp.Popen([config['binaries']['pbpaste']], stdout=sp.PIPE).communicate()
return out.decode()<|docstring|>Paste text from clipboard.
Returns:
str:Text from clipboard.<|endoftext|>
|
24595c921334cb760d471bdd25daa6f1983e0977c5122f8deb37698c836340eb
|
def subl(path):
'Open Sublime Text.\n\n Args:\n path (str): File to open.\n '
sp.Popen([config['binaries']['subl'], path]).wait()
|
Open Sublime Text.
Args:
path (str): File to open.
|
catalogue/bin.py
|
subl
|
wesselb/catalogue
| 0 |
python
|
def subl(path):
'Open Sublime Text.\n\n Args:\n path (str): File to open.\n '
sp.Popen([config['binaries']['subl'], path]).wait()
|
def subl(path):
'Open Sublime Text.\n\n Args:\n path (str): File to open.\n '
sp.Popen([config['binaries']['subl'], path]).wait()<|docstring|>Open Sublime Text.
Args:
path (str): File to open.<|endoftext|>
|
7bf2f8895d20954fcef2255882260983ef1b292c2cc8d560567405144aa94ab2
|
def trash(path):
'Move a file to trash.\n\n Args:\n path (str): Path.\n '
sp.Popen([config['binaries']['trash'], path]).wait()
|
Move a file to trash.
Args:
path (str): Path.
|
catalogue/bin.py
|
trash
|
wesselb/catalogue
| 0 |
python
|
def trash(path):
'Move a file to trash.\n\n Args:\n path (str): Path.\n '
sp.Popen([config['binaries']['trash'], path]).wait()
|
def trash(path):
'Move a file to trash.\n\n Args:\n path (str): Path.\n '
sp.Popen([config['binaries']['trash'], path]).wait()<|docstring|>Move a file to trash.
Args:
path (str): Path.<|endoftext|>
|
6c46265ca8a3ec0430b4de4ff738004120d4bdb1a4a6d917458abe221beca03d
|
def remove_emoji(s):
'去除表情'
emoji_pattern = re.compile('[𐀀-\U0010ffff]', flags=re.UNICODE)
return emoji_pattern.sub('', str(s))
|
去除表情
|
daemon/ck101_daemon.py
|
remove_emoji
|
Odland/crawler_selenium
| 0 |
python
|
def remove_emoji(s):
emoji_pattern = re.compile('[𐀀-\U0010ffff]', flags=re.UNICODE)
return emoji_pattern.sub(, str(s))
|
def remove_emoji(s):
emoji_pattern = re.compile('[𐀀-\U0010ffff]', flags=re.UNICODE)
return emoji_pattern.sub(, str(s))<|docstring|>去除表情<|endoftext|>
|
1cde3ea242ced4c77d6bb0c782ec192435d948974d8a625f0b15c60f36553d41
|
def remove_blank(self):
'去除空白字符'
blank_pattern = re.compile('\\s')
return blank_pattern.sub(' ', self.s)
|
去除空白字符
|
daemon/ck101_daemon.py
|
remove_blank
|
Odland/crawler_selenium
| 0 |
python
|
def remove_blank(self):
blank_pattern = re.compile('\\s')
return blank_pattern.sub(' ', self.s)
|
def remove_blank(self):
blank_pattern = re.compile('\\s')
return blank_pattern.sub(' ', self.s)<|docstring|>去除空白字符<|endoftext|>
|
7a729a671e5f3525dfbad49931168fd73c44c9a1ea7cb080cf5846b455b2c469
|
def rehttps(sb):
'匹配链接'
hs = re.compile('https://ck101.com/forum.php\\?mod=forumdisplay&fid=[0-9]+$')
return hs.search(sb)
|
匹配链接
|
daemon/ck101_daemon.py
|
rehttps
|
Odland/crawler_selenium
| 0 |
python
|
def rehttps(sb):
hs = re.compile('https://ck101.com/forum.php\\?mod=forumdisplay&fid=[0-9]+$')
return hs.search(sb)
|
def rehttps(sb):
hs = re.compile('https://ck101.com/forum.php\\?mod=forumdisplay&fid=[0-9]+$')
return hs.search(sb)<|docstring|>匹配链接<|endoftext|>
|
a025a33117f9a8ebb2416932a9491bbc530ecc98ce86cec634d7165698d2068b
|
def clean_comma(s):
'剔除数字中的逗号'
p = re.compile(',')
return p.sub('', s)
|
剔除数字中的逗号
|
daemon/ck101_daemon.py
|
clean_comma
|
Odland/crawler_selenium
| 0 |
python
|
def clean_comma(s):
p = re.compile(',')
return p.sub(, s)
|
def clean_comma(s):
p = re.compile(',')
return p.sub(, s)<|docstring|>剔除数字中的逗号<|endoftext|>
|
ebbc82e1ac3820ff1775ac3f4c99cf0014f66fcebc31140471971338cee86788
|
def reply_data(divs_, browser):
'生成器函数,返回回复者的数据'
for i in divs_:
try:
reply_poster = i.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except AttributeError:
continue
try:
reply_datetime = i.find('span', attrs={'class': 'postDateLine'}).text
reply_datetime = datetime.datetime.strptime(reply_datetime[4:], '%Y-%m-%d %H:%M')
reply_content = i.find('td', attrs={'class': 't_f'}).get_text()
reply_content = remove_emoji(reply_content)
except AttributeError:
reply_content = None
(yield (reply_poster, reply_datetime, reply_content, browser.current_url))
|
生成器函数,返回回复者的数据
|
daemon/ck101_daemon.py
|
reply_data
|
Odland/crawler_selenium
| 0 |
python
|
def reply_data(divs_, browser):
for i in divs_:
try:
reply_poster = i.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except AttributeError:
continue
try:
reply_datetime = i.find('span', attrs={'class': 'postDateLine'}).text
reply_datetime = datetime.datetime.strptime(reply_datetime[4:], '%Y-%m-%d %H:%M')
reply_content = i.find('td', attrs={'class': 't_f'}).get_text()
reply_content = remove_emoji(reply_content)
except AttributeError:
reply_content = None
(yield (reply_poster, reply_datetime, reply_content, browser.current_url))
|
def reply_data(divs_, browser):
for i in divs_:
try:
reply_poster = i.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except AttributeError:
continue
try:
reply_datetime = i.find('span', attrs={'class': 'postDateLine'}).text
reply_datetime = datetime.datetime.strptime(reply_datetime[4:], '%Y-%m-%d %H:%M')
reply_content = i.find('td', attrs={'class': 't_f'}).get_text()
reply_content = remove_emoji(reply_content)
except AttributeError:
reply_content = None
(yield (reply_poster, reply_datetime, reply_content, browser.current_url))<|docstring|>生成器函数,返回回复者的数据<|endoftext|>
|
c1d2e58d1191264371587da070ce5511ea5e3d204b5f255102963719b1987bb5
|
def ins_soc_reply(divs_, id_, browser):
'更新回复者的数据和评分'
post = Post.objects.get(id=id_)
num = 0
for s in reply_data(divs_, browser):
(reply_poster, reply_datetime, reply_content, reply_url) = s
reply = Reply(reply_poster=reply_poster, reply_content=reply_content, reply_datetime=reply_datetime, reply_url=reply_url, post=post)
reply.save()
if (Soc == True):
try:
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
trs = fetch_socre_ck(divs_[num], browser)
for tr in trs:
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
srnp = 0
srnumber = 0
srcontent = None
srposter = None
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
|
更新回复者的数据和评分
|
daemon/ck101_daemon.py
|
ins_soc_reply
|
Odland/crawler_selenium
| 0 |
python
|
def ins_soc_reply(divs_, id_, browser):
post = Post.objects.get(id=id_)
num = 0
for s in reply_data(divs_, browser):
(reply_poster, reply_datetime, reply_content, reply_url) = s
reply = Reply(reply_poster=reply_poster, reply_content=reply_content, reply_datetime=reply_datetime, reply_url=reply_url, post=post)
reply.save()
if (Soc == True):
try:
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
trs = fetch_socre_ck(divs_[num], browser)
for tr in trs:
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
srnp = 0
srnumber = 0
srcontent = None
srposter = None
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
|
def ins_soc_reply(divs_, id_, browser):
post = Post.objects.get(id=id_)
num = 0
for s in reply_data(divs_, browser):
(reply_poster, reply_datetime, reply_content, reply_url) = s
reply = Reply(reply_poster=reply_poster, reply_content=reply_content, reply_datetime=reply_datetime, reply_url=reply_url, post=post)
reply.save()
if (Soc == True):
try:
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
trs = fetch_socre_ck(divs_[num], browser)
for tr in trs:
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
srnp = 0
srnumber = 0
srcontent = None
srposter = None
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1<|docstring|>更新回复者的数据和评分<|endoftext|>
|
72721666024058c6983fd3be10dd4432acf95312b95369721d921faf5e89fef1
|
def np_ins_reply(lastcurrurl, id_, browser):
'根据获得版块的链接和最后更新时间来插入回复者数据和评分者数据'
(soup, divs_, browser) = brow_page_source(lastcurrurl, browser)
replys = Reply.objects.filter(post_id=id_).values()
reply_name = replys[(len(replys) - 1)]['reply_poster']
for (i, j) in enumerate(divs_):
if (j.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title') == reply_name):
divs_ = divs_[(i + 1):]
if (Soc == True):
socs = divs_[0:(i + 1)]
upsocre(socs, id_, browser)
break
ins_soc_reply(divs_, id_, browser)
num = 1
while True:
try:
if (num == NUM):
l = Last_Url.objects.get(post_id=id_)
l.lasturl = browser.current_url
l.save()
break
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
ins_soc_reply(divs_, id_)
except:
l = Last_Url.objects.get(post_id=id_)
l.lasturl = browser.current_url
l.save()
break
num += 1
|
根据获得版块的链接和最后更新时间来插入回复者数据和评分者数据
|
daemon/ck101_daemon.py
|
np_ins_reply
|
Odland/crawler_selenium
| 0 |
python
|
def np_ins_reply(lastcurrurl, id_, browser):
(soup, divs_, browser) = brow_page_source(lastcurrurl, browser)
replys = Reply.objects.filter(post_id=id_).values()
reply_name = replys[(len(replys) - 1)]['reply_poster']
for (i, j) in enumerate(divs_):
if (j.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title') == reply_name):
divs_ = divs_[(i + 1):]
if (Soc == True):
socs = divs_[0:(i + 1)]
upsocre(socs, id_, browser)
break
ins_soc_reply(divs_, id_, browser)
num = 1
while True:
try:
if (num == NUM):
l = Last_Url.objects.get(post_id=id_)
l.lasturl = browser.current_url
l.save()
break
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
ins_soc_reply(divs_, id_)
except:
l = Last_Url.objects.get(post_id=id_)
l.lasturl = browser.current_url
l.save()
break
num += 1
|
def np_ins_reply(lastcurrurl, id_, browser):
(soup, divs_, browser) = brow_page_source(lastcurrurl, browser)
replys = Reply.objects.filter(post_id=id_).values()
reply_name = replys[(len(replys) - 1)]['reply_poster']
for (i, j) in enumerate(divs_):
if (j.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title') == reply_name):
divs_ = divs_[(i + 1):]
if (Soc == True):
socs = divs_[0:(i + 1)]
upsocre(socs, id_, browser)
break
ins_soc_reply(divs_, id_, browser)
num = 1
while True:
try:
if (num == NUM):
l = Last_Url.objects.get(post_id=id_)
l.lasturl = browser.current_url
l.save()
break
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
ins_soc_reply(divs_, id_)
except:
l = Last_Url.objects.get(post_id=id_)
l.lasturl = browser.current_url
l.save()
break
num += 1<|docstring|>根据获得版块的链接和最后更新时间来插入回复者数据和评分者数据<|endoftext|>
|
05d2a8b9ec5fb63cb8eb8f3d7155f5053310de00acc481426c09ab563c8092d5
|
def brow_page_source(url, browser):
'打开页面'
browser.maximize_window()
browser.get(url)
WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id="postlist"]')))
time.sleep(3)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
return (soup, divs_, browser)
|
打开页面
|
daemon/ck101_daemon.py
|
brow_page_source
|
Odland/crawler_selenium
| 0 |
python
|
def brow_page_source(url, browser):
browser.maximize_window()
browser.get(url)
WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id="postlist"]')))
time.sleep(3)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
return (soup, divs_, browser)
|
def brow_page_source(url, browser):
browser.maximize_window()
browser.get(url)
WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id="postlist"]')))
time.sleep(3)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
return (soup, divs_, browser)<|docstring|>打开页面<|endoftext|>
|
6068bf66061922b60935f3dc82b9edf02a060be83c50fb7409234492d3b68cf0
|
def fetch_socre_ck(i, browser):
'点击评分者页面'
xp = ('ratelog_' + i.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
return trs
|
点击评分者页面
|
daemon/ck101_daemon.py
|
fetch_socre_ck
|
Odland/crawler_selenium
| 0 |
python
|
def fetch_socre_ck(i, browser):
xp = ('ratelog_' + i.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
return trs
|
def fetch_socre_ck(i, browser):
xp = ('ratelog_' + i.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
return trs<|docstring|>点击评分者页面<|endoftext|>
|
efa2f6c76cf81e52cdb1ee6412352cd9b0efd4a4b1ea58bfba53ce3b94aa6c64
|
def upsocre(divs_, id_, browser):
'更新评分者的数据'
for i in divs_:
try:
name = i.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except AttributeError:
continue
for n in Reply.objects.all().filter(post_id=id_):
if (n.reply_poster == name):
rid = n.id
break
if (int(i.find('a', attrs={'class': 'btncopy'}).em.text) != 1):
try:
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
rep = Score_Reply.objects.filter(reply_id=rid).values()[0]['srnp']
if (srnp > rep):
Score_Reply.objects.filter(reply_id=rid).update(srnp=srnp)
trs = fetch_socre_ck(i, browser)
num = 0
for tr in trs:
if (num <= (srnp - rep)):
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
reply = Reply.objects.get(id=rid)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except:
pass
else:
try:
spnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
pos = Score_Post.objects.filter(post_id=id_).values()[0]['spnp']
if (pos > spnp):
Score_Post.objects.filter(post_id=id_).update(spnp=spnp)
trs = fetch_socre_ck(i, browser)
num = 0
for tr in trs:
if (num <= (spnp - pos)):
tds = tr.find_all('td')
spnumber = tds[0].text
spposter = tds[1].text
spcontent = tds[3].text
spnumber = int(spnumber.split(' ')[1])
spcontent = remove_emoji(spcontent)
post = Post.objects.get(id=id_)
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
num += 1
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except Exception as e:
pass
return browser
|
更新评分者的数据
|
daemon/ck101_daemon.py
|
upsocre
|
Odland/crawler_selenium
| 0 |
python
|
def upsocre(divs_, id_, browser):
for i in divs_:
try:
name = i.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except AttributeError:
continue
for n in Reply.objects.all().filter(post_id=id_):
if (n.reply_poster == name):
rid = n.id
break
if (int(i.find('a', attrs={'class': 'btncopy'}).em.text) != 1):
try:
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
rep = Score_Reply.objects.filter(reply_id=rid).values()[0]['srnp']
if (srnp > rep):
Score_Reply.objects.filter(reply_id=rid).update(srnp=srnp)
trs = fetch_socre_ck(i, browser)
num = 0
for tr in trs:
if (num <= (srnp - rep)):
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
reply = Reply.objects.get(id=rid)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except:
pass
else:
try:
spnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
pos = Score_Post.objects.filter(post_id=id_).values()[0]['spnp']
if (pos > spnp):
Score_Post.objects.filter(post_id=id_).update(spnp=spnp)
trs = fetch_socre_ck(i, browser)
num = 0
for tr in trs:
if (num <= (spnp - pos)):
tds = tr.find_all('td')
spnumber = tds[0].text
spposter = tds[1].text
spcontent = tds[3].text
spnumber = int(spnumber.split(' ')[1])
spcontent = remove_emoji(spcontent)
post = Post.objects.get(id=id_)
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
num += 1
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except Exception as e:
pass
return browser
|
def upsocre(divs_, id_, browser):
for i in divs_:
try:
name = i.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except AttributeError:
continue
for n in Reply.objects.all().filter(post_id=id_):
if (n.reply_poster == name):
rid = n.id
break
if (int(i.find('a', attrs={'class': 'btncopy'}).em.text) != 1):
try:
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
rep = Score_Reply.objects.filter(reply_id=rid).values()[0]['srnp']
if (srnp > rep):
Score_Reply.objects.filter(reply_id=rid).update(srnp=srnp)
trs = fetch_socre_ck(i, browser)
num = 0
for tr in trs:
if (num <= (srnp - rep)):
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
reply = Reply.objects.get(id=rid)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except:
pass
else:
try:
spnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
pos = Score_Post.objects.filter(post_id=id_).values()[0]['spnp']
if (pos > spnp):
Score_Post.objects.filter(post_id=id_).update(spnp=spnp)
trs = fetch_socre_ck(i, browser)
num = 0
for tr in trs:
if (num <= (spnp - pos)):
tds = tr.find_all('td')
spnumber = tds[0].text
spposter = tds[1].text
spcontent = tds[3].text
spnumber = int(spnumber.split(' ')[1])
spcontent = remove_emoji(spcontent)
post = Post.objects.get(id=id_)
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
num += 1
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except Exception as e:
pass
return browser<|docstring|>更新评分者的数据<|endoftext|>
|
35fe22b42a23134ff7c7ed7dff0c70cb096f4eed09eea8a31e6be5ed4a1f151e
|
def update_data(url, lasttime, browser):
'整个的更新程序入口'
print('正在更新数据...')
p = Post.objects.get(post_url=url)
p.lastupdate_datetime = lasttime
p.save()
id_ = Post.objects.filter(post_url=url).values()[0]['id']
if (Soc == False):
try:
lastcurrurl = Last_Url.objects.filter(post_id=id_).values()[0]['lasturl']
np_ins_reply(lastcurrurl, id_, browser)
except Exception as e:
print(e)
else:
(soup, divs_, browser) = brow_page_source(url, browser)
pdiv = divs_[0]
try:
replay_number = int(pdiv.find('span', attrs={'class': 'replayNum'}).text)
favorite_number = int(pdiv.find('a', attrs={'id': 'k_favorite', 'class': 'k_favorite'}).span.text)
thank_number = int(pdiv.find('a', attrs={'id': 'post_thanktmp', 'class': 'ths'}).span.text)
recommend_number = int(pdiv.find('a', attrs={'id': 'recommend_add', 'class': 'recommend_add'}).span.text)
nrecommend_number = int(pdiv.find('a', attrs={'id': 'recommend_subtract', 'class': 'recommend_subtract'}).span.text)
Post.objects.filter(id=id_).update(view_number=view_number, replay_number=replay_number, favorite_number=favorite_number, thank_number=thank_number, recommend_number=recommend_number, nrecommend_number=nrecommend_number)
except BaseException as e:
pass
try:
browser.switch_to.frame(WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="p_btn"]/span/div/span/iframe'))))
fb_number = WebDriverWait(browser, 18).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u_0_3"]'))).text
fb_number = int(clean_comma(fb_number))
browser.switch_to.default_content()
Post.objects.filter(id=id_).update(fb_number=fb_number)
except BaseException as e:
print('请检查fb点赞数是否加载出来,并适当增加等待时间')
print(e)
if (lasttime != Post.objects.filter(post_url=url).values()[0]['lastupdate_datetime']):
lastcurrurl = Last_Url.objects.filter(post_id=id_).values()[0]['lasturl']
(soup, divs_, browser) = brow_page_source(url, browser)
while (lastcurrurl != browser.current_url):
browser = upsocre(divs_, id_, browser)
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
except Exception as e:
pass
np_ins_reply(lastcurrurl, id_, browser)
else:
(soup, divs_, browser) = brow_page_source(url, browser)
upsocre(divs_, id_, browser)
while True:
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
upsocre(divs_, id_, browser)
except:
break
return browser
|
整个的更新程序入口
|
daemon/ck101_daemon.py
|
update_data
|
Odland/crawler_selenium
| 0 |
python
|
def update_data(url, lasttime, browser):
print('正在更新数据...')
p = Post.objects.get(post_url=url)
p.lastupdate_datetime = lasttime
p.save()
id_ = Post.objects.filter(post_url=url).values()[0]['id']
if (Soc == False):
try:
lastcurrurl = Last_Url.objects.filter(post_id=id_).values()[0]['lasturl']
np_ins_reply(lastcurrurl, id_, browser)
except Exception as e:
print(e)
else:
(soup, divs_, browser) = brow_page_source(url, browser)
pdiv = divs_[0]
try:
replay_number = int(pdiv.find('span', attrs={'class': 'replayNum'}).text)
favorite_number = int(pdiv.find('a', attrs={'id': 'k_favorite', 'class': 'k_favorite'}).span.text)
thank_number = int(pdiv.find('a', attrs={'id': 'post_thanktmp', 'class': 'ths'}).span.text)
recommend_number = int(pdiv.find('a', attrs={'id': 'recommend_add', 'class': 'recommend_add'}).span.text)
nrecommend_number = int(pdiv.find('a', attrs={'id': 'recommend_subtract', 'class': 'recommend_subtract'}).span.text)
Post.objects.filter(id=id_).update(view_number=view_number, replay_number=replay_number, favorite_number=favorite_number, thank_number=thank_number, recommend_number=recommend_number, nrecommend_number=nrecommend_number)
except BaseException as e:
pass
try:
browser.switch_to.frame(WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="p_btn"]/span/div/span/iframe'))))
fb_number = WebDriverWait(browser, 18).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u_0_3"]'))).text
fb_number = int(clean_comma(fb_number))
browser.switch_to.default_content()
Post.objects.filter(id=id_).update(fb_number=fb_number)
except BaseException as e:
print('请检查fb点赞数是否加载出来,并适当增加等待时间')
print(e)
if (lasttime != Post.objects.filter(post_url=url).values()[0]['lastupdate_datetime']):
lastcurrurl = Last_Url.objects.filter(post_id=id_).values()[0]['lasturl']
(soup, divs_, browser) = brow_page_source(url, browser)
while (lastcurrurl != browser.current_url):
browser = upsocre(divs_, id_, browser)
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
except Exception as e:
pass
np_ins_reply(lastcurrurl, id_, browser)
else:
(soup, divs_, browser) = brow_page_source(url, browser)
upsocre(divs_, id_, browser)
while True:
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
upsocre(divs_, id_, browser)
except:
break
return browser
|
def update_data(url, lasttime, browser):
print('正在更新数据...')
p = Post.objects.get(post_url=url)
p.lastupdate_datetime = lasttime
p.save()
id_ = Post.objects.filter(post_url=url).values()[0]['id']
if (Soc == False):
try:
lastcurrurl = Last_Url.objects.filter(post_id=id_).values()[0]['lasturl']
np_ins_reply(lastcurrurl, id_, browser)
except Exception as e:
print(e)
else:
(soup, divs_, browser) = brow_page_source(url, browser)
pdiv = divs_[0]
try:
replay_number = int(pdiv.find('span', attrs={'class': 'replayNum'}).text)
favorite_number = int(pdiv.find('a', attrs={'id': 'k_favorite', 'class': 'k_favorite'}).span.text)
thank_number = int(pdiv.find('a', attrs={'id': 'post_thanktmp', 'class': 'ths'}).span.text)
recommend_number = int(pdiv.find('a', attrs={'id': 'recommend_add', 'class': 'recommend_add'}).span.text)
nrecommend_number = int(pdiv.find('a', attrs={'id': 'recommend_subtract', 'class': 'recommend_subtract'}).span.text)
Post.objects.filter(id=id_).update(view_number=view_number, replay_number=replay_number, favorite_number=favorite_number, thank_number=thank_number, recommend_number=recommend_number, nrecommend_number=nrecommend_number)
except BaseException as e:
pass
try:
browser.switch_to.frame(WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="p_btn"]/span/div/span/iframe'))))
fb_number = WebDriverWait(browser, 18).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u_0_3"]'))).text
fb_number = int(clean_comma(fb_number))
browser.switch_to.default_content()
Post.objects.filter(id=id_).update(fb_number=fb_number)
except BaseException as e:
print('请检查fb点赞数是否加载出来,并适当增加等待时间')
print(e)
if (lasttime != Post.objects.filter(post_url=url).values()[0]['lastupdate_datetime']):
lastcurrurl = Last_Url.objects.filter(post_id=id_).values()[0]['lasturl']
(soup, divs_, browser) = brow_page_source(url, browser)
while (lastcurrurl != browser.current_url):
browser = upsocre(divs_, id_, browser)
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
except Exception as e:
pass
np_ins_reply(lastcurrurl, id_, browser)
else:
(soup, divs_, browser) = brow_page_source(url, browser)
upsocre(divs_, id_, browser)
while True:
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
upsocre(divs_, id_, browser)
except:
break
return browser<|docstring|>整个的更新程序入口<|endoftext|>
|
979bea93561e97dbd4839cdb82b9a18298885fb0f0823ef6bf4c79e05fc05b79
|
def post_div(pdiv, post_url, url, last_time, browser):
'获取楼主发帖的数据'
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
try:
category = pdiv.select_one('article hgroup > h2 > a').get('title')
title = pdiv.find('h1', attrs={'id': 'thread_subject', 'itemprop': 'headline'}).text
try:
view_number = int(pdiv.find('span', attrs={'class': 'viewNum'}).text)
except Exception as e:
view_number = 0
try:
replay_number = int(pdiv.find('span', attrs={'class': 'replayNum'}).text)
except Exception as e:
replay_number = 0
try:
poster_name = pdiv.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except Exception as e:
poster_name = '秘密客'
post_datetime = pdiv.find('span', attrs={'class': 'postDateLine'}).text
post_datetime = datetime.datetime.strptime(post_datetime[4:], '%Y-%m-%d %H:%M')
post_content = pdiv.find('td', attrs={'class': 't_f'})
[s.extract() for s in post_content('script')]
post_content = post_content.get_text()
post_content = remove_emoji(post_content)
except BaseException as e:
print(e)
try:
browser.switch_to.frame(WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="p_btn"]/span/div/span/iframe'))))
fb_number = WebDriverWait(browser, 18).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u_0_3"]'))).text
fb_number = int(clean_comma(fb_number))
browser.switch_to.default_content()
except BaseException as e:
print(e)
favorite_number = int(pdiv.find('a', attrs={'id': 'k_favorite', 'class': 'k_favorite'}).span.text)
thank_number = int(pdiv.find('a', attrs={'id': 'post_thanktmp', 'class': 'ths'}).span.text)
recommend_number = int(pdiv.find('a', attrs={'id': 'recommend_add', 'class': 'recommend_add'}).span.text)
nrecommend_number = int(pdiv.find('a', attrs={'id': 'recommend_subtract', 'class': 'recommend_subtract'}).span.text)
post = Post(entrance_id=0, category=category, title=title, post_url=post_url, view_number=view_number, replay_number=replay_number, poster_name=poster_name, post_datetime=post_datetime, post_content=post_content, favorite_number=favorite_number, thank_number=thank_number, recommend_number=recommend_number, nrecommend_number=nrecommend_number, fb_number=fb_number, lastupdate_datetime=last_time)
post.save()
if (Soc == True):
try:
spnp = int(pdiv.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
xp = ('ratelog_' + pdiv.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
spnumber = tds[0].text
spposter = tds[1].text
spcontent = tds[3].text
spnumber = int(spnumber.split(' ')[1])
spcontent = remove_emoji(spcontent)
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
spnp = 0
spnumber = 0
spcontent = None
spposter = None
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
return post
|
获取楼主发帖的数据
|
daemon/ck101_daemon.py
|
post_div
|
Odland/crawler_selenium
| 0 |
python
|
def post_div(pdiv, post_url, url, last_time, browser):
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
try:
category = pdiv.select_one('article hgroup > h2 > a').get('title')
title = pdiv.find('h1', attrs={'id': 'thread_subject', 'itemprop': 'headline'}).text
try:
view_number = int(pdiv.find('span', attrs={'class': 'viewNum'}).text)
except Exception as e:
view_number = 0
try:
replay_number = int(pdiv.find('span', attrs={'class': 'replayNum'}).text)
except Exception as e:
replay_number = 0
try:
poster_name = pdiv.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except Exception as e:
poster_name = '秘密客'
post_datetime = pdiv.find('span', attrs={'class': 'postDateLine'}).text
post_datetime = datetime.datetime.strptime(post_datetime[4:], '%Y-%m-%d %H:%M')
post_content = pdiv.find('td', attrs={'class': 't_f'})
[s.extract() for s in post_content('script')]
post_content = post_content.get_text()
post_content = remove_emoji(post_content)
except BaseException as e:
print(e)
try:
browser.switch_to.frame(WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="p_btn"]/span/div/span/iframe'))))
fb_number = WebDriverWait(browser, 18).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u_0_3"]'))).text
fb_number = int(clean_comma(fb_number))
browser.switch_to.default_content()
except BaseException as e:
print(e)
favorite_number = int(pdiv.find('a', attrs={'id': 'k_favorite', 'class': 'k_favorite'}).span.text)
thank_number = int(pdiv.find('a', attrs={'id': 'post_thanktmp', 'class': 'ths'}).span.text)
recommend_number = int(pdiv.find('a', attrs={'id': 'recommend_add', 'class': 'recommend_add'}).span.text)
nrecommend_number = int(pdiv.find('a', attrs={'id': 'recommend_subtract', 'class': 'recommend_subtract'}).span.text)
post = Post(entrance_id=0, category=category, title=title, post_url=post_url, view_number=view_number, replay_number=replay_number, poster_name=poster_name, post_datetime=post_datetime, post_content=post_content, favorite_number=favorite_number, thank_number=thank_number, recommend_number=recommend_number, nrecommend_number=nrecommend_number, fb_number=fb_number, lastupdate_datetime=last_time)
post.save()
if (Soc == True):
try:
spnp = int(pdiv.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
xp = ('ratelog_' + pdiv.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
spnumber = tds[0].text
spposter = tds[1].text
spcontent = tds[3].text
spnumber = int(spnumber.split(' ')[1])
spcontent = remove_emoji(spcontent)
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
spnp = 0
spnumber = 0
spcontent = None
spposter = None
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
return post
|
def post_div(pdiv, post_url, url, last_time, browser):
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
try:
category = pdiv.select_one('article hgroup > h2 > a').get('title')
title = pdiv.find('h1', attrs={'id': 'thread_subject', 'itemprop': 'headline'}).text
try:
view_number = int(pdiv.find('span', attrs={'class': 'viewNum'}).text)
except Exception as e:
view_number = 0
try:
replay_number = int(pdiv.find('span', attrs={'class': 'replayNum'}).text)
except Exception as e:
replay_number = 0
try:
poster_name = pdiv.find('a', attrs={'class': 'authorName', 'target': '_blank'}).get('title')
except Exception as e:
poster_name = '秘密客'
post_datetime = pdiv.find('span', attrs={'class': 'postDateLine'}).text
post_datetime = datetime.datetime.strptime(post_datetime[4:], '%Y-%m-%d %H:%M')
post_content = pdiv.find('td', attrs={'class': 't_f'})
[s.extract() for s in post_content('script')]
post_content = post_content.get_text()
post_content = remove_emoji(post_content)
except BaseException as e:
print(e)
try:
browser.switch_to.frame(WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="p_btn"]/span/div/span/iframe'))))
fb_number = WebDriverWait(browser, 18).until(EC.presence_of_element_located((By.XPATH, '//*[@id="u_0_3"]'))).text
fb_number = int(clean_comma(fb_number))
browser.switch_to.default_content()
except BaseException as e:
print(e)
favorite_number = int(pdiv.find('a', attrs={'id': 'k_favorite', 'class': 'k_favorite'}).span.text)
thank_number = int(pdiv.find('a', attrs={'id': 'post_thanktmp', 'class': 'ths'}).span.text)
recommend_number = int(pdiv.find('a', attrs={'id': 'recommend_add', 'class': 'recommend_add'}).span.text)
nrecommend_number = int(pdiv.find('a', attrs={'id': 'recommend_subtract', 'class': 'recommend_subtract'}).span.text)
post = Post(entrance_id=0, category=category, title=title, post_url=post_url, view_number=view_number, replay_number=replay_number, poster_name=poster_name, post_datetime=post_datetime, post_content=post_content, favorite_number=favorite_number, thank_number=thank_number, recommend_number=recommend_number, nrecommend_number=nrecommend_number, fb_number=fb_number, lastupdate_datetime=last_time)
post.save()
if (Soc == True):
try:
spnp = int(pdiv.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
xp = ('ratelog_' + pdiv.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
spnumber = tds[0].text
spposter = tds[1].text
spcontent = tds[3].text
spnumber = int(spnumber.split(' ')[1])
spcontent = remove_emoji(spcontent)
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
clk = WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
spnp = 0
spnumber = 0
spcontent = None
spposter = None
socre_post = Score_Post(spposter=spposter, spnumber=spnumber, spcontent=spcontent, spnp=spnp, post=post)
socre_post.save()
return post<|docstring|>获取楼主发帖的数据<|endoftext|>
|
a647d23060a0d9f20ffebf483773fb847565e049d887cf9e3a89303706ea31a8
|
def reply_divs(rdivs, post, browser):
'获取回复者的数据'
num = 0
for s in reply_data(rdivs, browser):
(reply_poster, reply_datetime, reply_content, reply_url) = s
reply = Reply(reply_poster=reply_poster, reply_content=reply_content, reply_datetime=reply_datetime, reply_url=reply_url, post=post)
reply.save()
if Soc:
try:
i = rdivs[num]
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
xp = ('ratelog_' + i.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
clk = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
srnp = 0
srnumber = 0
srcontent = None
srposter = None
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
|
获取回复者的数据
|
daemon/ck101_daemon.py
|
reply_divs
|
Odland/crawler_selenium
| 0 |
python
|
def reply_divs(rdivs, post, browser):
num = 0
for s in reply_data(rdivs, browser):
(reply_poster, reply_datetime, reply_content, reply_url) = s
reply = Reply(reply_poster=reply_poster, reply_content=reply_content, reply_datetime=reply_datetime, reply_url=reply_url, post=post)
reply.save()
if Soc:
try:
i = rdivs[num]
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
xp = ('ratelog_' + i.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
clk = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
srnp = 0
srnumber = 0
srcontent = None
srposter = None
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1
|
def reply_divs(rdivs, post, browser):
num = 0
for s in reply_data(rdivs, browser):
(reply_poster, reply_datetime, reply_content, reply_url) = s
reply = Reply(reply_poster=reply_poster, reply_content=reply_content, reply_datetime=reply_datetime, reply_url=reply_url, post=post)
reply.save()
if Soc:
try:
i = rdivs[num]
srnp = int(i.find('ul', attrs={'class': 'rateInfo'}).li.p.text.split('評')[0])
xp = ('ratelog_' + i.attrs['id'].split('_')[1])
xp = (("//*[@id='" + xp) + "']/dd/p/a[1]")
ck = browser.find_element_by_xpath(xp)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
tbody = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fwin_content_viewratings"]/div[1]/div/table/tbody')), message='超时')
soup_trs = BeautifulSoup(tbody.get_attribute('innerHTML'), 'lxml')
trs = soup_trs.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
srnumber = tds[0].text
srposter = tds[1].text
srcontent = tds[3].text
srnumber = int(srnumber.split(' ')[1])
srcontent = remove_emoji(srcontent)
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
clk = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="fctrl_viewratings"]/span/a')), message='超时')
browser.execute_script('arguments[0].scrollIntoView(false);', clk)
browser.execute_script('arguments[0].click()', clk)
except AttributeError:
srnp = 0
srnumber = 0
srcontent = None
srposter = None
socre_reply = Score_Reply(srposter=srposter, srnumber=srnumber, srcontent=srcontent, srnp=srnp, reply=reply)
socre_reply.save()
num += 1<|docstring|>获取回复者的数据<|endoftext|>
|
206ad6c8832bc1e758675b9e346bbd74c2f165ae925254df8151cd47db483597
|
def divs(post_url, last_time, url, browser):
'爬取数据的程序入口'
browser.get(post_url)
WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id="postlist"]')))
time.sleep(1)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
pdiv = divs_[0]
post = post_div(pdiv, post_url, url, last_time, browser)
rdivs = divs_[1:]
if (len(rdivs) > 0):
reply_divs(rdivs, post, browser)
while True:
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
time.sleep(1)
soup = BeautifulSoup(browser.page_source, 'lxml')
npages = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
reply_divs(npages, post, browser)
currpagenum = soup.select_one('#pgt > div.pgt > div > strong')
if currpagenum:
hnum = int(soup.select_one('#pgt > div.pgt > div > strong').text)
if (hnum == NUM):
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
break
except:
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
break
else:
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
return browser
|
爬取数据的程序入口
|
daemon/ck101_daemon.py
|
divs
|
Odland/crawler_selenium
| 0 |
python
|
def divs(post_url, last_time, url, browser):
browser.get(post_url)
WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id="postlist"]')))
time.sleep(1)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
pdiv = divs_[0]
post = post_div(pdiv, post_url, url, last_time, browser)
rdivs = divs_[1:]
if (len(rdivs) > 0):
reply_divs(rdivs, post, browser)
while True:
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
time.sleep(1)
soup = BeautifulSoup(browser.page_source, 'lxml')
npages = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
reply_divs(npages, post, browser)
currpagenum = soup.select_one('#pgt > div.pgt > div > strong')
if currpagenum:
hnum = int(soup.select_one('#pgt > div.pgt > div > strong').text)
if (hnum == NUM):
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
break
except:
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
break
else:
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
return browser
|
def divs(post_url, last_time, url, browser):
browser.get(post_url)
WebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id="postlist"]')))
time.sleep(1)
soup = BeautifulSoup(browser.page_source, 'lxml')
divs_ = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
pdiv = divs_[0]
post = post_div(pdiv, post_url, url, last_time, browser)
rdivs = divs_[1:]
if (len(rdivs) > 0):
reply_divs(rdivs, post, browser)
while True:
try:
next_page = browser.find_element_by_css_selector('#postlist > div.threadBottom > div.pgs.mtm.mbm.cl > div > a.nxt')
browser.execute_script('arguments[0].scrollIntoView(false);', next_page)
browser.execute_script('arguments[0].click()', next_page)
time.sleep(1)
soup = BeautifulSoup(browser.page_source, 'lxml')
npages = soup.find_all('div', attrs={'id': True, 'class': 'plhin'})
reply_divs(npages, post, browser)
currpagenum = soup.select_one('#pgt > div.pgt > div > strong')
if currpagenum:
hnum = int(soup.select_one('#pgt > div.pgt > div > strong').text)
if (hnum == NUM):
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
break
except:
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
break
else:
last_url = Last_Url(lasturl=browser.current_url, post=post)
last_url.save()
return browser<|docstring|>爬取数据的程序入口<|endoftext|>
|
adffc59de25ebf91477b54b44502ca54729ef8160ee5962a53284f972b928139
|
def job(deep, genre='https://ck101.com/forum.php?mod=forumdisplay&fid=3551&page=3', browser=webdriver.Chrome(options=chrome_options)):
'整个的项目的外界入口'
browser.maximize_window()
try:
browser.get(genre)
p = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.XPATH, '//*[@id="ct"]/div/div[4]/div[2]/div/div[6]/span[3]/a')))
if (p.get_attribute('class') == 'chked'):
genre = p.get_attribute('href')
except:
pass
url = genre
browser.get(url)
elem = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="threadlisttableid"]')))
if elem:
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
print('程序开始运行...')
time.sleep(1)
soup_level1 = BeautifulSoup(browser.page_source, 'lxml')
pages = []
dbpages = set()
for i in Post.objects.all():
dbpages.add(i.post_url)
print('数据库中有{}个版块链接'.format(len(dbpages)))
tbs = soup_level1.find_all('tbody', attrs={'id': re.compile('^normalthread*'), 'class': 'threadrow'})
updatepag = []
for tb in tbs:
herf = tb.find('a', class_='s xst').get('href')
lastpost_time = tb.find('a', class_='lastpost_time').text
lastpost_time = datetime.datetime.strptime(lastpost_time, '%Y-%m-%d %H:%M')
if (not (herf in dbpages)):
pages.append([herf, lastpost_time])
elif Soc:
updatepag.append([herf, lastpost_time])
elif (lastpost_time != Post.objects.filter(post_url=herf).values()[0]['lastupdate_datetime']):
updatepag.append([herf, lastpost_time])
else:
pass
if (deep == (len(pages) + len(updatepag))):
break
if (deep > (len(pages) + len(updatepag))):
while True:
mess = '#fd_page_top > div > a.nxt'
ck = browser.find_element_by_css_selector(mess)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="threadlisttableid"]')))
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(0.5)
soup_level1 = BeautifulSoup(browser.page_source, 'lxml')
tbs = soup_level1.find_all('tbody', attrs={'id': re.compile('^normalthread*'), 'class': 'threadrow'})
for tb in tbs:
herf = tb.find('a', class_='s xst').get('href')
lastpost_time = tb.find('a', class_='lastpost_time').text
lastpost_time = datetime.datetime.strptime(lastpost_time, '%Y-%m-%d %H:%M')
if (not (herf in dbpages)):
pages.append([herf, lastpost_time])
elif Soc:
updatepag.append([herf, lastpost_time])
elif (lastpost_time != Post.objects.filter(post_url=herf).values()[0]['lastupdate_datetime']):
updatepag.append([herf, lastpost_time])
else:
pass
if (deep == (len(pages) + len(updatepag))):
break
if (deep == (len(pages) + len(updatepag))):
break
print('获取了{}个新的链接'.format(len(pages)))
print('新链接数:', len(pages), '\t 更新的链接数', len(updatepag))
for post_url_time in pages:
browser = divs(post_url_time[0], post_url_time[1], url, browser)
print('抓取了{}个版块的数据,开始更新数据,将有{}个版块被更新'.format(len(pages), len(updatepag)))
for upurltime in updatepag:
browser = update_data(upurltime[0], upurltime[1], browser)
browser.quit()
else:
print('请检查你的定位元素是否准确')
|
整个的项目的外界入口
|
daemon/ck101_daemon.py
|
job
|
Odland/crawler_selenium
| 0 |
python
|
def job(deep, genre='https://ck101.com/forum.php?mod=forumdisplay&fid=3551&page=3', browser=webdriver.Chrome(options=chrome_options)):
browser.maximize_window()
try:
browser.get(genre)
p = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.XPATH, '//*[@id="ct"]/div/div[4]/div[2]/div/div[6]/span[3]/a')))
if (p.get_attribute('class') == 'chked'):
genre = p.get_attribute('href')
except:
pass
url = genre
browser.get(url)
elem = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="threadlisttableid"]')))
if elem:
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
print('程序开始运行...')
time.sleep(1)
soup_level1 = BeautifulSoup(browser.page_source, 'lxml')
pages = []
dbpages = set()
for i in Post.objects.all():
dbpages.add(i.post_url)
print('数据库中有{}个版块链接'.format(len(dbpages)))
tbs = soup_level1.find_all('tbody', attrs={'id': re.compile('^normalthread*'), 'class': 'threadrow'})
updatepag = []
for tb in tbs:
herf = tb.find('a', class_='s xst').get('href')
lastpost_time = tb.find('a', class_='lastpost_time').text
lastpost_time = datetime.datetime.strptime(lastpost_time, '%Y-%m-%d %H:%M')
if (not (herf in dbpages)):
pages.append([herf, lastpost_time])
elif Soc:
updatepag.append([herf, lastpost_time])
elif (lastpost_time != Post.objects.filter(post_url=herf).values()[0]['lastupdate_datetime']):
updatepag.append([herf, lastpost_time])
else:
pass
if (deep == (len(pages) + len(updatepag))):
break
if (deep > (len(pages) + len(updatepag))):
while True:
mess = '#fd_page_top > div > a.nxt'
ck = browser.find_element_by_css_selector(mess)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="threadlisttableid"]')))
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(0.5)
soup_level1 = BeautifulSoup(browser.page_source, 'lxml')
tbs = soup_level1.find_all('tbody', attrs={'id': re.compile('^normalthread*'), 'class': 'threadrow'})
for tb in tbs:
herf = tb.find('a', class_='s xst').get('href')
lastpost_time = tb.find('a', class_='lastpost_time').text
lastpost_time = datetime.datetime.strptime(lastpost_time, '%Y-%m-%d %H:%M')
if (not (herf in dbpages)):
pages.append([herf, lastpost_time])
elif Soc:
updatepag.append([herf, lastpost_time])
elif (lastpost_time != Post.objects.filter(post_url=herf).values()[0]['lastupdate_datetime']):
updatepag.append([herf, lastpost_time])
else:
pass
if (deep == (len(pages) + len(updatepag))):
break
if (deep == (len(pages) + len(updatepag))):
break
print('获取了{}个新的链接'.format(len(pages)))
print('新链接数:', len(pages), '\t 更新的链接数', len(updatepag))
for post_url_time in pages:
browser = divs(post_url_time[0], post_url_time[1], url, browser)
print('抓取了{}个版块的数据,开始更新数据,将有{}个版块被更新'.format(len(pages), len(updatepag)))
for upurltime in updatepag:
browser = update_data(upurltime[0], upurltime[1], browser)
browser.quit()
else:
print('请检查你的定位元素是否准确')
|
def job(deep, genre='https://ck101.com/forum.php?mod=forumdisplay&fid=3551&page=3', browser=webdriver.Chrome(options=chrome_options)):
browser.maximize_window()
try:
browser.get(genre)
p = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.XPATH, '//*[@id="ct"]/div/div[4]/div[2]/div/div[6]/span[3]/a')))
if (p.get_attribute('class') == 'chked'):
genre = p.get_attribute('href')
except:
pass
url = genre
browser.get(url)
elem = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="threadlisttableid"]')))
if elem:
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
print('程序开始运行...')
time.sleep(1)
soup_level1 = BeautifulSoup(browser.page_source, 'lxml')
pages = []
dbpages = set()
for i in Post.objects.all():
dbpages.add(i.post_url)
print('数据库中有{}个版块链接'.format(len(dbpages)))
tbs = soup_level1.find_all('tbody', attrs={'id': re.compile('^normalthread*'), 'class': 'threadrow'})
updatepag = []
for tb in tbs:
herf = tb.find('a', class_='s xst').get('href')
lastpost_time = tb.find('a', class_='lastpost_time').text
lastpost_time = datetime.datetime.strptime(lastpost_time, '%Y-%m-%d %H:%M')
if (not (herf in dbpages)):
pages.append([herf, lastpost_time])
elif Soc:
updatepag.append([herf, lastpost_time])
elif (lastpost_time != Post.objects.filter(post_url=herf).values()[0]['lastupdate_datetime']):
updatepag.append([herf, lastpost_time])
else:
pass
if (deep == (len(pages) + len(updatepag))):
break
if (deep > (len(pages) + len(updatepag))):
while True:
mess = '#fd_page_top > div > a.nxt'
ck = browser.find_element_by_css_selector(mess)
browser.execute_script('arguments[0].scrollIntoView(false);', ck)
browser.execute_script('arguments[0].click()', ck)
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id="threadlisttableid"]')))
browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
time.sleep(0.5)
soup_level1 = BeautifulSoup(browser.page_source, 'lxml')
tbs = soup_level1.find_all('tbody', attrs={'id': re.compile('^normalthread*'), 'class': 'threadrow'})
for tb in tbs:
herf = tb.find('a', class_='s xst').get('href')
lastpost_time = tb.find('a', class_='lastpost_time').text
lastpost_time = datetime.datetime.strptime(lastpost_time, '%Y-%m-%d %H:%M')
if (not (herf in dbpages)):
pages.append([herf, lastpost_time])
elif Soc:
updatepag.append([herf, lastpost_time])
elif (lastpost_time != Post.objects.filter(post_url=herf).values()[0]['lastupdate_datetime']):
updatepag.append([herf, lastpost_time])
else:
pass
if (deep == (len(pages) + len(updatepag))):
break
if (deep == (len(pages) + len(updatepag))):
break
print('获取了{}个新的链接'.format(len(pages)))
print('新链接数:', len(pages), '\t 更新的链接数', len(updatepag))
for post_url_time in pages:
browser = divs(post_url_time[0], post_url_time[1], url, browser)
print('抓取了{}个版块的数据,开始更新数据,将有{}个版块被更新'.format(len(pages), len(updatepag)))
for upurltime in updatepag:
browser = update_data(upurltime[0], upurltime[1], browser)
browser.quit()
else:
print('请检查你的定位元素是否准确')<|docstring|>整个的项目的外界入口<|endoftext|>
|
a70684faee265e66df3636a8b4788d4f1c991b40bd0a3c0ba1cefcf0ac46fbf2
|
@when('bigtop.available', 'hadoop.hdfs.ready', 'zookeeper.ready')
def install_hbase(hdfs, zk):
'\n Anytime our dependencies are available, check to see if we have a valid\n reason to (re)install. These include:\n - initial install\n - config change\n - Zookeeper unit has joined/departed\n '
zks = zk.zookeepers()
deployment_matrix = {'zookeepers': zks}
if (not is_state('hbase.installed')):
prefix = 'installing'
data_changed('deployment_matrix', deployment_matrix)
else:
prefix = 'configuring'
if (is_state('hbpeer.departed') or is_state('hbpeer.joined')):
return
if (not (is_state('config.changed') or data_changed('deployment_matrix', deployment_matrix))):
return
hookenv.status_set('maintenance', '{} hbase'.format(prefix))
hookenv.log('{} hbase with: {}'.format(prefix, deployment_matrix))
hbase = HBase()
hosts = {}
hosts['namenode'] = hdfs.namenodes()[0]
hbase.configure(hosts, zks)
hbase.update_regionservers([hookenv.unit_private_ip()])
if any_file_changed(['/etc/hbase/conf/regionservers']):
hbase.restart()
hbase_version = (get_package_version('hbase-master') or 'unknown')
hookenv.application_version_set(hbase_version)
hbase.open_ports()
report_status()
set_state('hbase.installed')
|
Anytime our dependencies are available, check to see if we have a valid
reason to (re)install. These include:
- initial install
- config change
- Zookeeper unit has joined/departed
|
bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
|
install_hbase
|
yamasakisua/bigtop-private
| 371 |
python
|
@when('bigtop.available', 'hadoop.hdfs.ready', 'zookeeper.ready')
def install_hbase(hdfs, zk):
'\n Anytime our dependencies are available, check to see if we have a valid\n reason to (re)install. These include:\n - initial install\n - config change\n - Zookeeper unit has joined/departed\n '
zks = zk.zookeepers()
deployment_matrix = {'zookeepers': zks}
if (not is_state('hbase.installed')):
prefix = 'installing'
data_changed('deployment_matrix', deployment_matrix)
else:
prefix = 'configuring'
if (is_state('hbpeer.departed') or is_state('hbpeer.joined')):
return
if (not (is_state('config.changed') or data_changed('deployment_matrix', deployment_matrix))):
return
hookenv.status_set('maintenance', '{} hbase'.format(prefix))
hookenv.log('{} hbase with: {}'.format(prefix, deployment_matrix))
hbase = HBase()
hosts = {}
hosts['namenode'] = hdfs.namenodes()[0]
hbase.configure(hosts, zks)
hbase.update_regionservers([hookenv.unit_private_ip()])
if any_file_changed(['/etc/hbase/conf/regionservers']):
hbase.restart()
hbase_version = (get_package_version('hbase-master') or 'unknown')
hookenv.application_version_set(hbase_version)
hbase.open_ports()
report_status()
set_state('hbase.installed')
|
@when('bigtop.available', 'hadoop.hdfs.ready', 'zookeeper.ready')
def install_hbase(hdfs, zk):
'\n Anytime our dependencies are available, check to see if we have a valid\n reason to (re)install. These include:\n - initial install\n - config change\n - Zookeeper unit has joined/departed\n '
zks = zk.zookeepers()
deployment_matrix = {'zookeepers': zks}
if (not is_state('hbase.installed')):
prefix = 'installing'
data_changed('deployment_matrix', deployment_matrix)
else:
prefix = 'configuring'
if (is_state('hbpeer.departed') or is_state('hbpeer.joined')):
return
if (not (is_state('config.changed') or data_changed('deployment_matrix', deployment_matrix))):
return
hookenv.status_set('maintenance', '{} hbase'.format(prefix))
hookenv.log('{} hbase with: {}'.format(prefix, deployment_matrix))
hbase = HBase()
hosts = {}
hosts['namenode'] = hdfs.namenodes()[0]
hbase.configure(hosts, zks)
hbase.update_regionservers([hookenv.unit_private_ip()])
if any_file_changed(['/etc/hbase/conf/regionservers']):
hbase.restart()
hbase_version = (get_package_version('hbase-master') or 'unknown')
hookenv.application_version_set(hbase_version)
hbase.open_ports()
report_status()
set_state('hbase.installed')<|docstring|>Anytime our dependencies are available, check to see if we have a valid
reason to (re)install. These include:
- initial install
- config change
- Zookeeper unit has joined/departed<|endoftext|>
|
d8b746f7fcb8483ac968e454813ad2cd71800600203d26464c15026cc4ca52d3
|
@when('hbase.installed')
@when_not_all('hadoop.hdfs.ready', 'zookeeper.ready')
def stop_hbase():
'\n HBase depends on HDFS and Zookeeper. If we are installed and either of\n these dependencies go away, shut down HBase services and remove our\n installed state.\n '
hbase = HBase()
hbase.close_ports()
hbase.stop()
remove_state('hbase.installed')
report_status()
|
HBase depends on HDFS and Zookeeper. If we are installed and either of
these dependencies go away, shut down HBase services and remove our
installed state.
|
bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
|
stop_hbase
|
yamasakisua/bigtop-private
| 371 |
python
|
@when('hbase.installed')
@when_not_all('hadoop.hdfs.ready', 'zookeeper.ready')
def stop_hbase():
'\n HBase depends on HDFS and Zookeeper. If we are installed and either of\n these dependencies go away, shut down HBase services and remove our\n installed state.\n '
hbase = HBase()
hbase.close_ports()
hbase.stop()
remove_state('hbase.installed')
report_status()
|
@when('hbase.installed')
@when_not_all('hadoop.hdfs.ready', 'zookeeper.ready')
def stop_hbase():
'\n HBase depends on HDFS and Zookeeper. If we are installed and either of\n these dependencies go away, shut down HBase services and remove our\n installed state.\n '
hbase = HBase()
hbase.close_ports()
hbase.stop()
remove_state('hbase.installed')
report_status()<|docstring|>HBase depends on HDFS and Zookeeper. If we are installed and either of
these dependencies go away, shut down HBase services and remove our
installed state.<|endoftext|>
|
d456169cc862ff08e5f17eae5ae1bd1bf86c600dfae0778348388d827d9a0e2e
|
@when('hbase.installed')
@when_any('hbpeer.departed', 'hbpeer.joined')
def handle_peers():
"\n We use HBase peers to keep track of the RegionServer IP addresses in a\n cluster. Use get_nodes() from the appropriate peer relation to retrieve\n a list of peer tuples, e.g.:\n [('hbase/0', '172.31.5.161'), ('hbase/2', '172.31.5.11')]\n\n Depending on the state, this handler will add or remove peer IP addresses\n from the regionservers config file.\n "
if is_state('hbpeer.departed'):
hbpeer = RelationBase.from_state('hbpeer.departed')
is_departing = True
message = 'removing hbase peer(s)'
else:
hbpeer = RelationBase.from_state('hbpeer.joined')
is_departing = False
message = 'adding hbase peer(s)'
if hbpeer:
nodes = hbpeer.get_nodes()
else:
hookenv.log('Ignoring unknown HBase peer state')
return
hookenv.status_set('maintenance', message)
hbase = HBase()
ip_addrs = [node[1] for node in nodes]
hookenv.log('{}: {}'.format(message, ip_addrs))
hbase.update_regionservers(ip_addrs, remove=is_departing)
if any_file_changed(['/etc/hbase/conf/regionservers']):
hbase.restart()
if is_departing:
hbpeer.dismiss_departed()
else:
hbpeer.dismiss_joined()
report_status()
|
We use HBase peers to keep track of the RegionServer IP addresses in a
cluster. Use get_nodes() from the appropriate peer relation to retrieve
a list of peer tuples, e.g.:
[('hbase/0', '172.31.5.161'), ('hbase/2', '172.31.5.11')]
Depending on the state, this handler will add or remove peer IP addresses
from the regionservers config file.
|
bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
|
handle_peers
|
yamasakisua/bigtop-private
| 371 |
python
|
@when('hbase.installed')
@when_any('hbpeer.departed', 'hbpeer.joined')
def handle_peers():
"\n We use HBase peers to keep track of the RegionServer IP addresses in a\n cluster. Use get_nodes() from the appropriate peer relation to retrieve\n a list of peer tuples, e.g.:\n [('hbase/0', '172.31.5.161'), ('hbase/2', '172.31.5.11')]\n\n Depending on the state, this handler will add or remove peer IP addresses\n from the regionservers config file.\n "
if is_state('hbpeer.departed'):
hbpeer = RelationBase.from_state('hbpeer.departed')
is_departing = True
message = 'removing hbase peer(s)'
else:
hbpeer = RelationBase.from_state('hbpeer.joined')
is_departing = False
message = 'adding hbase peer(s)'
if hbpeer:
nodes = hbpeer.get_nodes()
else:
hookenv.log('Ignoring unknown HBase peer state')
return
hookenv.status_set('maintenance', message)
hbase = HBase()
ip_addrs = [node[1] for node in nodes]
hookenv.log('{}: {}'.format(message, ip_addrs))
hbase.update_regionservers(ip_addrs, remove=is_departing)
if any_file_changed(['/etc/hbase/conf/regionservers']):
hbase.restart()
if is_departing:
hbpeer.dismiss_departed()
else:
hbpeer.dismiss_joined()
report_status()
|
@when('hbase.installed')
@when_any('hbpeer.departed', 'hbpeer.joined')
def handle_peers():
"\n We use HBase peers to keep track of the RegionServer IP addresses in a\n cluster. Use get_nodes() from the appropriate peer relation to retrieve\n a list of peer tuples, e.g.:\n [('hbase/0', '172.31.5.161'), ('hbase/2', '172.31.5.11')]\n\n Depending on the state, this handler will add or remove peer IP addresses\n from the regionservers config file.\n "
if is_state('hbpeer.departed'):
hbpeer = RelationBase.from_state('hbpeer.departed')
is_departing = True
message = 'removing hbase peer(s)'
else:
hbpeer = RelationBase.from_state('hbpeer.joined')
is_departing = False
message = 'adding hbase peer(s)'
if hbpeer:
nodes = hbpeer.get_nodes()
else:
hookenv.log('Ignoring unknown HBase peer state')
return
hookenv.status_set('maintenance', message)
hbase = HBase()
ip_addrs = [node[1] for node in nodes]
hookenv.log('{}: {}'.format(message, ip_addrs))
hbase.update_regionservers(ip_addrs, remove=is_departing)
if any_file_changed(['/etc/hbase/conf/regionservers']):
hbase.restart()
if is_departing:
hbpeer.dismiss_departed()
else:
hbpeer.dismiss_joined()
report_status()<|docstring|>We use HBase peers to keep track of the RegionServer IP addresses in a
cluster. Use get_nodes() from the appropriate peer relation to retrieve
a list of peer tuples, e.g.:
[('hbase/0', '172.31.5.161'), ('hbase/2', '172.31.5.11')]
Depending on the state, this handler will add or remove peer IP addresses
from the regionservers config file.<|endoftext|>
|
559f3a34bea9ecbc1692f5b02e606eed860776a8226709a18952c0693301ab8a
|
@when('hbase.installed', 'leadership.is_leader')
@when('zookeeper.ready', 'hbclient.joined')
def serve_client(zk, client):
"\n We may have multiple HBase peers, but we only need to send 1 set of\n connection data. Leverage Juju leadership to only send the leader\n info (even if it's not the actual HBase master).\n\n Zookeeper will ensure that any HBase peer routes requests to the\n appropriate master.\n "
hbase = HBase()
config = get_layer_opts()
host = hookenv.unit_private_ip()
master_port = config.port('hbase-master')
regionserver_port = config.port('hbase-region')
thrift_port = config.port('hbase-thrift')
zk_connect = hbase.get_zk_connect(zk.zookeepers())
client.send_connection(master_port=master_port, regionserver_port=regionserver_port, thrift_port=thrift_port, host=host, zk_connect=zk_connect)
hookenv.log('Serving HBase client with master {}:{}, regionserver port {}, thrift port {}, and zk connect {}'.format(host, master_port, regionserver_port, thrift_port, zk_connect))
|
We may have multiple HBase peers, but we only need to send 1 set of
connection data. Leverage Juju leadership to only send the leader
info (even if it's not the actual HBase master).
Zookeeper will ensure that any HBase peer routes requests to the
appropriate master.
|
bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
|
serve_client
|
yamasakisua/bigtop-private
| 371 |
python
|
@when('hbase.installed', 'leadership.is_leader')
@when('zookeeper.ready', 'hbclient.joined')
def serve_client(zk, client):
"\n We may have multiple HBase peers, but we only need to send 1 set of\n connection data. Leverage Juju leadership to only send the leader\n info (even if it's not the actual HBase master).\n\n Zookeeper will ensure that any HBase peer routes requests to the\n appropriate master.\n "
hbase = HBase()
config = get_layer_opts()
host = hookenv.unit_private_ip()
master_port = config.port('hbase-master')
regionserver_port = config.port('hbase-region')
thrift_port = config.port('hbase-thrift')
zk_connect = hbase.get_zk_connect(zk.zookeepers())
client.send_connection(master_port=master_port, regionserver_port=regionserver_port, thrift_port=thrift_port, host=host, zk_connect=zk_connect)
hookenv.log('Serving HBase client with master {}:{}, regionserver port {}, thrift port {}, and zk connect {}'.format(host, master_port, regionserver_port, thrift_port, zk_connect))
|
@when('hbase.installed', 'leadership.is_leader')
@when('zookeeper.ready', 'hbclient.joined')
def serve_client(zk, client):
"\n We may have multiple HBase peers, but we only need to send 1 set of\n connection data. Leverage Juju leadership to only send the leader\n info (even if it's not the actual HBase master).\n\n Zookeeper will ensure that any HBase peer routes requests to the\n appropriate master.\n "
hbase = HBase()
config = get_layer_opts()
host = hookenv.unit_private_ip()
master_port = config.port('hbase-master')
regionserver_port = config.port('hbase-region')
thrift_port = config.port('hbase-thrift')
zk_connect = hbase.get_zk_connect(zk.zookeepers())
client.send_connection(master_port=master_port, regionserver_port=regionserver_port, thrift_port=thrift_port, host=host, zk_connect=zk_connect)
hookenv.log('Serving HBase client with master {}:{}, regionserver port {}, thrift port {}, and zk connect {}'.format(host, master_port, regionserver_port, thrift_port, zk_connect))<|docstring|>We may have multiple HBase peers, but we only need to send 1 set of
connection data. Leverage Juju leadership to only send the leader
info (even if it's not the actual HBase master).
Zookeeper will ensure that any HBase peer routes requests to the
appropriate master.<|endoftext|>
|
396edf28f7e8f41de362f9eef3c9b569c9431ef61b7ca6ba0517d5793404f4f9
|
@when('leadership.is_leader', 'hbclient.joined')
@when_not('hbase.installed')
def stop_serving_client(client):
"\n If HDFS or ZK goes away, the 'installed' state will be removed. If we have\n connected clients, inform them that hbase is no longer ready.\n "
client.clear_hbase_started()
|
If HDFS or ZK goes away, the 'installed' state will be removed. If we have
connected clients, inform them that hbase is no longer ready.
|
bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py
|
stop_serving_client
|
yamasakisua/bigtop-private
| 371 |
python
|
@when('leadership.is_leader', 'hbclient.joined')
@when_not('hbase.installed')
def stop_serving_client(client):
"\n If HDFS or ZK goes away, the 'installed' state will be removed. If we have\n connected clients, inform them that hbase is no longer ready.\n "
client.clear_hbase_started()
|
@when('leadership.is_leader', 'hbclient.joined')
@when_not('hbase.installed')
def stop_serving_client(client):
"\n If HDFS or ZK goes away, the 'installed' state will be removed. If we have\n connected clients, inform them that hbase is no longer ready.\n "
client.clear_hbase_started()<|docstring|>If HDFS or ZK goes away, the 'installed' state will be removed. If we have
connected clients, inform them that hbase is no longer ready.<|endoftext|>
|
6472f0c89f8e55b3fc3aa0a3e5294b2296868fa40c820cae337292a70ee2e468
|
def __init__(self, intervals: Optional[List[BinningInterval]]=None):
'\n Args:\n intervals: intervals for binning. Defaults to default_intervals.\n '
if (intervals is None):
intervals = default_intervals
self.limits = QuantityBinningLimits(intervals)
self.temperature_extractor = TemperatureExtractor()
self.quantity_binner = QuantityBinner(self.limits.get_boundaries())
self.placeholder_handler = PlaceholderHandler.for_temperatures()
|
Args:
intervals: intervals for binning. Defaults to default_intervals.
|
smiles2actions/quantities/temperature_placeholder.py
|
__init__
|
rxn4chemistry/smiles2actions
| 8 |
python
|
def __init__(self, intervals: Optional[List[BinningInterval]]=None):
'\n Args:\n intervals: intervals for binning. Defaults to default_intervals.\n '
if (intervals is None):
intervals = default_intervals
self.limits = QuantityBinningLimits(intervals)
self.temperature_extractor = TemperatureExtractor()
self.quantity_binner = QuantityBinner(self.limits.get_boundaries())
self.placeholder_handler = PlaceholderHandler.for_temperatures()
|
def __init__(self, intervals: Optional[List[BinningInterval]]=None):
'\n Args:\n intervals: intervals for binning. Defaults to default_intervals.\n '
if (intervals is None):
intervals = default_intervals
self.limits = QuantityBinningLimits(intervals)
self.temperature_extractor = TemperatureExtractor()
self.quantity_binner = QuantityBinner(self.limits.get_boundaries())
self.placeholder_handler = PlaceholderHandler.for_temperatures()<|docstring|>Args:
intervals: intervals for binning. Defaults to default_intervals.<|endoftext|>
|
74d229e6f0cde4ef5e55b14a854e573ecbc81c1d072bf3b880d442ed27e46d25
|
def register_plugin(plugin):
'Registers a plugin into cortex\n\n Args:\n plugin: TODO\n\n Returns:\n\n '
if issubclass(plugin, ModelPlugin):
register_model(plugin)
elif issubclass(plugin, DatasetPlugin):
register_data(plugin)
else:
raise ValueError(plugin)
|
Registers a plugin into cortex
Args:
plugin: TODO
Returns:
|
cortex/plugins.py
|
register_plugin
|
bbradt/cortex
| 109 |
python
|
def register_plugin(plugin):
'Registers a plugin into cortex\n\n Args:\n plugin: TODO\n\n Returns:\n\n '
if issubclass(plugin, ModelPlugin):
register_model(plugin)
elif issubclass(plugin, DatasetPlugin):
register_data(plugin)
else:
raise ValueError(plugin)
|
def register_plugin(plugin):
'Registers a plugin into cortex\n\n Args:\n plugin: TODO\n\n Returns:\n\n '
if issubclass(plugin, ModelPlugin):
register_model(plugin)
elif issubclass(plugin, DatasetPlugin):
register_data(plugin)
else:
raise ValueError(plugin)<|docstring|>Registers a plugin into cortex
Args:
plugin: TODO
Returns:<|endoftext|>
|
276c542cd9406b34b90f39920fa7a5bea3ded7af7ff6693426fdcc289e8add73
|
def copy_to_local_path(self, from_path: str) -> str:
' Copies data to a local path.\n\n Path is set in the .cortex.yml file. This can be set up through\n `cortex setup`.\n\n Args:\n from_path: The path to the data to be copied.\n\n '
if from_path.endswith('/'):
from_path = from_path[:(- 1)]
basename = path.basename(from_path)
local_path = CONFIG.data_paths.get('local')
if (local_path is None):
raise KeyError('`{}` not found in {} data_paths'.format(local_path, _config_name))
to_path = path.join(local_path, basename)
if ((not path.exists(to_path)) and path.exists(from_path)):
logger.info('Copying dataset {} from {} to {} directory.... (This may take time)'.format(self.__class__.__name__, from_path, to_path))
if path.isdir(from_path):
shutil.copytree(from_path, to_path)
else:
shutil.copy(from_path, local_path)
logger.info('Finished copying.')
return to_path
|
Copies data to a local path.
Path is set in the .cortex.yml file. This can be set up through
`cortex setup`.
Args:
from_path: The path to the data to be copied.
|
cortex/plugins.py
|
copy_to_local_path
|
bbradt/cortex
| 109 |
python
|
def copy_to_local_path(self, from_path: str) -> str:
' Copies data to a local path.\n\n Path is set in the .cortex.yml file. This can be set up through\n `cortex setup`.\n\n Args:\n from_path: The path to the data to be copied.\n\n '
if from_path.endswith('/'):
from_path = from_path[:(- 1)]
basename = path.basename(from_path)
local_path = CONFIG.data_paths.get('local')
if (local_path is None):
raise KeyError('`{}` not found in {} data_paths'.format(local_path, _config_name))
to_path = path.join(local_path, basename)
if ((not path.exists(to_path)) and path.exists(from_path)):
logger.info('Copying dataset {} from {} to {} directory.... (This may take time)'.format(self.__class__.__name__, from_path, to_path))
if path.isdir(from_path):
shutil.copytree(from_path, to_path)
else:
shutil.copy(from_path, local_path)
logger.info('Finished copying.')
return to_path
|
def copy_to_local_path(self, from_path: str) -> str:
' Copies data to a local path.\n\n Path is set in the .cortex.yml file. This can be set up through\n `cortex setup`.\n\n Args:\n from_path: The path to the data to be copied.\n\n '
if from_path.endswith('/'):
from_path = from_path[:(- 1)]
basename = path.basename(from_path)
local_path = CONFIG.data_paths.get('local')
if (local_path is None):
raise KeyError('`{}` not found in {} data_paths'.format(local_path, _config_name))
to_path = path.join(local_path, basename)
if ((not path.exists(to_path)) and path.exists(from_path)):
logger.info('Copying dataset {} from {} to {} directory.... (This may take time)'.format(self.__class__.__name__, from_path, to_path))
if path.isdir(from_path):
shutil.copytree(from_path, to_path)
else:
shutil.copy(from_path, local_path)
logger.info('Finished copying.')
return to_path<|docstring|>Copies data to a local path.
Path is set in the .cortex.yml file. This can be set up through
`cortex setup`.
Args:
from_path: The path to the data to be copied.<|endoftext|>
|
21c4151174a65e8dc62a24e3355db558507b2547386e826c9b045138f2224a3c
|
def add_dataset(self, mode: str, dataset: Dataset):
'Adds a dataset to the plugin.\n\n Any dataset added in this way will be used in the training or testing\n loops, depending on the mode specified.\n\n Args:\n mode: The data mode that this dataset will be run on.\n `train` and `test` are highly recommended.\n dataset: The dataset object.\n\n '
if (mode in self._datasets):
raise KeyError('`{}` already added to datasets in entrypoint'.format(mode))
self._datasets[mode] = dataset
|
Adds a dataset to the plugin.
Any dataset added in this way will be used in the training or testing
loops, depending on the mode specified.
Args:
mode: The data mode that this dataset will be run on.
`train` and `test` are highly recommended.
dataset: The dataset object.
|
cortex/plugins.py
|
add_dataset
|
bbradt/cortex
| 109 |
python
|
def add_dataset(self, mode: str, dataset: Dataset):
'Adds a dataset to the plugin.\n\n Any dataset added in this way will be used in the training or testing\n loops, depending on the mode specified.\n\n Args:\n mode: The data mode that this dataset will be run on.\n `train` and `test` are highly recommended.\n dataset: The dataset object.\n\n '
if (mode in self._datasets):
raise KeyError('`{}` already added to datasets in entrypoint'.format(mode))
self._datasets[mode] = dataset
|
def add_dataset(self, mode: str, dataset: Dataset):
'Adds a dataset to the plugin.\n\n Any dataset added in this way will be used in the training or testing\n loops, depending on the mode specified.\n\n Args:\n mode: The data mode that this dataset will be run on.\n `train` and `test` are highly recommended.\n dataset: The dataset object.\n\n '
if (mode in self._datasets):
raise KeyError('`{}` already added to datasets in entrypoint'.format(mode))
self._datasets[mode] = dataset<|docstring|>Adds a dataset to the plugin.
Any dataset added in this way will be used in the training or testing
loops, depending on the mode specified.
Args:
mode: The data mode that this dataset will be run on.
`train` and `test` are highly recommended.
dataset: The dataset object.<|endoftext|>
|
53c0d5cb88d564ba05961af1e30a2dc7849bd37e88a261b5c4860666d548b89d
|
def set_dataloader_class(self, dataloader_class: type):
"Set dataloader class.\n\n It will be used instead of :class:`torch.utils.data.DataLoader`.\n\n Args:\n dataloader_class: custom data loader class\n\n Notes:\n This method can be used to pass custom collate function, pass\n arguments to the dataloader or use a completely custom loader.\n\n Example:\n ```\n class MyData(DatasetPlugin):\n sources = ['MyData']\n\n def handle(self, source, copy_to_local=False, normalize=True,\n tanh_normalization=False, **transform_args):\n train_set = ...\n\n def collate(batch):\n collated_batch = ...\n return collated_batch\n\n NewDataLoader = partial(DataLoader, collate_fn=collate)\n self.add_dataset('train', train_set)\n self.set_dataloader_class(dataloader_class=NewDataLoader)\n ```\n\n "
self._dataloader_class = dataloader_class
|
Set dataloader class.
It will be used instead of :class:`torch.utils.data.DataLoader`.
Args:
dataloader_class: custom data loader class
Notes:
This method can be used to pass custom collate function, pass
arguments to the dataloader or use a completely custom loader.
Example:
```
class MyData(DatasetPlugin):
sources = ['MyData']
def handle(self, source, copy_to_local=False, normalize=True,
tanh_normalization=False, **transform_args):
train_set = ...
def collate(batch):
collated_batch = ...
return collated_batch
NewDataLoader = partial(DataLoader, collate_fn=collate)
self.add_dataset('train', train_set)
self.set_dataloader_class(dataloader_class=NewDataLoader)
```
|
cortex/plugins.py
|
set_dataloader_class
|
bbradt/cortex
| 109 |
python
|
def set_dataloader_class(self, dataloader_class: type):
"Set dataloader class.\n\n It will be used instead of :class:`torch.utils.data.DataLoader`.\n\n Args:\n dataloader_class: custom data loader class\n\n Notes:\n This method can be used to pass custom collate function, pass\n arguments to the dataloader or use a completely custom loader.\n\n Example:\n ```\n class MyData(DatasetPlugin):\n sources = ['MyData']\n\n def handle(self, source, copy_to_local=False, normalize=True,\n tanh_normalization=False, **transform_args):\n train_set = ...\n\n def collate(batch):\n collated_batch = ...\n return collated_batch\n\n NewDataLoader = partial(DataLoader, collate_fn=collate)\n self.add_dataset('train', train_set)\n self.set_dataloader_class(dataloader_class=NewDataLoader)\n ```\n\n "
self._dataloader_class = dataloader_class
|
def set_dataloader_class(self, dataloader_class: type):
"Set dataloader class.\n\n It will be used instead of :class:`torch.utils.data.DataLoader`.\n\n Args:\n dataloader_class: custom data loader class\n\n Notes:\n This method can be used to pass custom collate function, pass\n arguments to the dataloader or use a completely custom loader.\n\n Example:\n ```\n class MyData(DatasetPlugin):\n sources = ['MyData']\n\n def handle(self, source, copy_to_local=False, normalize=True,\n tanh_normalization=False, **transform_args):\n train_set = ...\n\n def collate(batch):\n collated_batch = ...\n return collated_batch\n\n NewDataLoader = partial(DataLoader, collate_fn=collate)\n self.add_dataset('train', train_set)\n self.set_dataloader_class(dataloader_class=NewDataLoader)\n ```\n\n "
self._dataloader_class = dataloader_class<|docstring|>Set dataloader class.
It will be used instead of :class:`torch.utils.data.DataLoader`.
Args:
dataloader_class: custom data loader class
Notes:
This method can be used to pass custom collate function, pass
arguments to the dataloader or use a completely custom loader.
Example:
```
class MyData(DatasetPlugin):
sources = ['MyData']
def handle(self, source, copy_to_local=False, normalize=True,
tanh_normalization=False, **transform_args):
train_set = ...
def collate(batch):
collated_batch = ...
return collated_batch
NewDataLoader = partial(DataLoader, collate_fn=collate)
self.add_dataset('train', train_set)
self.set_dataloader_class(dataloader_class=NewDataLoader)
```<|endoftext|>
|
c3f51bcc3c41f93f720b9df9786a06f678360deab4521afda97cb37e113f91e7
|
def get_path(self, source: str):
"Get's the path to a source.\n\n This is derived from config.yaml file.\n\n Args:\n source: str for the dataset source.\n\n Returns:\n The path to the dataset.\n\n "
p = CONFIG.data_paths.get(source)
if (p is None):
raise KeyError('`{}` not found in {} data_paths'.format(source, _config_name))
return p
|
Get's the path to a source.
This is derived from config.yaml file.
Args:
source: str for the dataset source.
Returns:
The path to the dataset.
|
cortex/plugins.py
|
get_path
|
bbradt/cortex
| 109 |
python
|
def get_path(self, source: str):
"Get's the path to a source.\n\n This is derived from config.yaml file.\n\n Args:\n source: str for the dataset source.\n\n Returns:\n The path to the dataset.\n\n "
p = CONFIG.data_paths.get(source)
if (p is None):
raise KeyError('`{}` not found in {} data_paths'.format(source, _config_name))
return p
|
def get_path(self, source: str):
"Get's the path to a source.\n\n This is derived from config.yaml file.\n\n Args:\n source: str for the dataset source.\n\n Returns:\n The path to the dataset.\n\n "
p = CONFIG.data_paths.get(source)
if (p is None):
raise KeyError('`{}` not found in {} data_paths'.format(source, _config_name))
return p<|docstring|>Get's the path to a source.
This is derived from config.yaml file.
Args:
source: str for the dataset source.
Returns:
The path to the dataset.<|endoftext|>
|
2d3a9889ddd9642fb955c4faa2df59a92efedf521adfc180aef678606b914004
|
def set_input_names(self, input_names):
'Sets the names of the elements of the dataset.\n\n For use downstream in models.\n\n Args:\n input_names (:obj:`list` of :obj:`str`): The input names.\n Should be the same size as the output of the dataset iterator.\n\n '
self._input_names = input_names
|
Sets the names of the elements of the dataset.
For use downstream in models.
Args:
input_names (:obj:`list` of :obj:`str`): The input names.
Should be the same size as the output of the dataset iterator.
|
cortex/plugins.py
|
set_input_names
|
bbradt/cortex
| 109 |
python
|
def set_input_names(self, input_names):
'Sets the names of the elements of the dataset.\n\n For use downstream in models.\n\n Args:\n input_names (:obj:`list` of :obj:`str`): The input names.\n Should be the same size as the output of the dataset iterator.\n\n '
self._input_names = input_names
|
def set_input_names(self, input_names):
'Sets the names of the elements of the dataset.\n\n For use downstream in models.\n\n Args:\n input_names (:obj:`list` of :obj:`str`): The input names.\n Should be the same size as the output of the dataset iterator.\n\n '
self._input_names = input_names<|docstring|>Sets the names of the elements of the dataset.
For use downstream in models.
Args:
input_names (:obj:`list` of :obj:`str`): The input names.
Should be the same size as the output of the dataset iterator.<|endoftext|>
|
78a16ba89ec6b781483e562f15c4f168e7b4b95f35a9289f2e8bd4296afbc095
|
def set_dims(self, **kwargs):
'Sets the dimensions of the data.\n\n Args:\n **kwargs: a dictionary of dimension keys and ints.\n\n '
for (k, v) in kwargs.items():
self._dims[k] = v
|
Sets the dimensions of the data.
Args:
**kwargs: a dictionary of dimension keys and ints.
|
cortex/plugins.py
|
set_dims
|
bbradt/cortex
| 109 |
python
|
def set_dims(self, **kwargs):
'Sets the dimensions of the data.\n\n Args:\n **kwargs: a dictionary of dimension keys and ints.\n\n '
for (k, v) in kwargs.items():
self._dims[k] = v
|
def set_dims(self, **kwargs):
'Sets the dimensions of the data.\n\n Args:\n **kwargs: a dictionary of dimension keys and ints.\n\n '
for (k, v) in kwargs.items():
self._dims[k] = v<|docstring|>Sets the dimensions of the data.
Args:
**kwargs: a dictionary of dimension keys and ints.<|endoftext|>
|
e747d4a4875ebecfa1fefdbc9fe0bb6e75ebd9d3a86ab848734a95a22a69f6b2
|
def set_scale(self, scale):
"Sets the min / max values for the data.\n\n Note:\n This will probably be removed. It doesn't even function right now.\n\n Args:\n scale (:obj:`tuple` of :obj:`float`): min/max pair.\n\n "
self._scale = scale
|
Sets the min / max values for the data.
Note:
This will probably be removed. It doesn't even function right now.
Args:
scale (:obj:`tuple` of :obj:`float`): min/max pair.
|
cortex/plugins.py
|
set_scale
|
bbradt/cortex
| 109 |
python
|
def set_scale(self, scale):
"Sets the min / max values for the data.\n\n Note:\n This will probably be removed. It doesn't even function right now.\n\n Args:\n scale (:obj:`tuple` of :obj:`float`): min/max pair.\n\n "
self._scale = scale
|
def set_scale(self, scale):
"Sets the min / max values for the data.\n\n Note:\n This will probably be removed. It doesn't even function right now.\n\n Args:\n scale (:obj:`tuple` of :obj:`float`): min/max pair.\n\n "
self._scale = scale<|docstring|>Sets the min / max values for the data.
Note:
This will probably be removed. It doesn't even function right now.
Args:
scale (:obj:`tuple` of :obj:`float`): min/max pair.<|endoftext|>
|
f3ccf087053ad59e462157d3a22e5e16f5d68102a2f82c34b8649f1ea6af24c8
|
def make_indexing(self, C):
'Makes an indexing dataset.\n\n Index comes in as the last element of the batch.\n\n Args:\n C: data.Dataset class.\n\n Returns:\n Wrapped data.Dataset class.\n\n '
class IndexingDataset(C):
def __getitem__(self, index):
output = super().__getitem__(index)
return (output + (index,))
return IndexingDataset
|
Makes an indexing dataset.
Index comes in as the last element of the batch.
Args:
C: data.Dataset class.
Returns:
Wrapped data.Dataset class.
|
cortex/plugins.py
|
make_indexing
|
bbradt/cortex
| 109 |
python
|
def make_indexing(self, C):
'Makes an indexing dataset.\n\n Index comes in as the last element of the batch.\n\n Args:\n C: data.Dataset class.\n\n Returns:\n Wrapped data.Dataset class.\n\n '
class IndexingDataset(C):
def __getitem__(self, index):
output = super().__getitem__(index)
return (output + (index,))
return IndexingDataset
|
def make_indexing(self, C):
'Makes an indexing dataset.\n\n Index comes in as the last element of the batch.\n\n Args:\n C: data.Dataset class.\n\n Returns:\n Wrapped data.Dataset class.\n\n '
class IndexingDataset(C):
def __getitem__(self, index):
output = super().__getitem__(index)
return (output + (index,))
return IndexingDataset<|docstring|>Makes an indexing dataset.
Index comes in as the last element of the batch.
Args:
C: data.Dataset class.
Returns:
Wrapped data.Dataset class.<|endoftext|>
|
7778bc3b2037e362ec94633b03f265dd555bcdf1ecbed18c67eebc301a0ef2fe
|
def build(self, *args, **kwargs):
'Builds the neural networks.\n\n The the model is to build something, this needs to be overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
raise NotImplementedError('`build` is not implemented for model class {}'.format(self.__class__.__name__))
|
Builds the neural networks.
The the model is to build something, this needs to be overridden.
Args:
*args: Inputs to be passed to the function.
**kwargs: Hyperparameters to be passed to the function
|
cortex/plugins.py
|
build
|
bbradt/cortex
| 109 |
python
|
def build(self, *args, **kwargs):
'Builds the neural networks.\n\n The the model is to build something, this needs to be overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
raise NotImplementedError('`build` is not implemented for model class {}'.format(self.__class__.__name__))
|
def build(self, *args, **kwargs):
'Builds the neural networks.\n\n The the model is to build something, this needs to be overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
raise NotImplementedError('`build` is not implemented for model class {}'.format(self.__class__.__name__))<|docstring|>Builds the neural networks.
The the model is to build something, this needs to be overridden.
Args:
*args: Inputs to be passed to the function.
**kwargs: Hyperparameters to be passed to the function<|endoftext|>
|
47cbe3d0bf303d2e55d89ca93b4dc197cab19871c4bb9e029d5815b25a1173f3
|
def routine(self, *args, **kwargs):
'Derives losses and results.\n\n The the model is to train something, this needs to be\n overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
raise NotImplementedError('`routine` is not implemented for model class {}'.format(self.__class__.__name__))
|
Derives losses and results.
The the model is to train something, this needs to be
overridden.
Args:
*args: Inputs to be passed to the function.
**kwargs: Hyperparameters to be passed to the function
|
cortex/plugins.py
|
routine
|
bbradt/cortex
| 109 |
python
|
def routine(self, *args, **kwargs):
'Derives losses and results.\n\n The the model is to train something, this needs to be\n overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
raise NotImplementedError('`routine` is not implemented for model class {}'.format(self.__class__.__name__))
|
def routine(self, *args, **kwargs):
'Derives losses and results.\n\n The the model is to train something, this needs to be\n overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
raise NotImplementedError('`routine` is not implemented for model class {}'.format(self.__class__.__name__))<|docstring|>Derives losses and results.
The the model is to train something, this needs to be
overridden.
Args:
*args: Inputs to be passed to the function.
**kwargs: Hyperparameters to be passed to the function<|endoftext|>
|
dec3958a8e803f0e565a5d6875633b97c4cf0e823064dffe5bff05bd56fda59a
|
def visualize(self, *args, **kwargs):
'Visualizes.\n\n The the model is to visualize something, this needs to be\n overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
pass
|
Visualizes.
The the model is to visualize something, this needs to be
overridden.
Args:
*args: Inputs to be passed to the function.
**kwargs: Hyperparameters to be passed to the function
|
cortex/plugins.py
|
visualize
|
bbradt/cortex
| 109 |
python
|
def visualize(self, *args, **kwargs):
'Visualizes.\n\n The the model is to visualize something, this needs to be\n overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
pass
|
def visualize(self, *args, **kwargs):
'Visualizes.\n\n The the model is to visualize something, this needs to be\n overridden.\n\n Args:\n *args: Inputs to be passed to the function.\n **kwargs: Hyperparameters to be passed to the function\n\n '
pass<|docstring|>Visualizes.
The the model is to visualize something, this needs to be
overridden.
Args:
*args: Inputs to be passed to the function.
**kwargs: Hyperparameters to be passed to the function<|endoftext|>
|
769fdd2a273a26e03299820af2ce2323b1aeb7e2f45749b8fc0309b052f7e515
|
def train_step(self):
'Makes a training step.\n\n This can be overridden to change the behavior at each training step.\n\n '
self.data.next()
self.routine(auto_input=True)
self.optimizer_step()
|
Makes a training step.
This can be overridden to change the behavior at each training step.
|
cortex/plugins.py
|
train_step
|
bbradt/cortex
| 109 |
python
|
def train_step(self):
'Makes a training step.\n\n This can be overridden to change the behavior at each training step.\n\n '
self.data.next()
self.routine(auto_input=True)
self.optimizer_step()
|
def train_step(self):
'Makes a training step.\n\n This can be overridden to change the behavior at each training step.\n\n '
self.data.next()
self.routine(auto_input=True)
self.optimizer_step()<|docstring|>Makes a training step.
This can be overridden to change the behavior at each training step.<|endoftext|>
|
c78417090ba15c2ba16c2700298898e4db8668f7191a0aa776b3cf581176ba25
|
def eval_step(self):
'Makes an evaluation step.\n\n This can be overridden to change the behavior of each evaluation step.\n\n '
self.data.next()
self.routine(auto_input=True)
|
Makes an evaluation step.
This can be overridden to change the behavior of each evaluation step.
|
cortex/plugins.py
|
eval_step
|
bbradt/cortex
| 109 |
python
|
def eval_step(self):
'Makes an evaluation step.\n\n This can be overridden to change the behavior of each evaluation step.\n\n '
self.data.next()
self.routine(auto_input=True)
|
def eval_step(self):
'Makes an evaluation step.\n\n This can be overridden to change the behavior of each evaluation step.\n\n '
self.data.next()
self.routine(auto_input=True)<|docstring|>Makes an evaluation step.
This can be overridden to change the behavior of each evaluation step.<|endoftext|>
|
d091d40921ef9510ce3b2d523524980cd4d40a11aee5fe08316ffab22e7f3f51
|
def optimizer_step(self):
'Makes a step of the optimizers for which losses are defined.\n\n This can be overridden to change the behavior of the optimizer.\n\n '
keys = self.losses.keys()
for (i, k) in enumerate(keys):
loss = self.losses.pop(k)
loss.backward(retain_graph=(i < len(keys)))
key = self.nets._aliases.get(k, k)
optimizer = self._optimizers.get(key)
if (optimizer is not None):
optimizer.step()
|
Makes a step of the optimizers for which losses are defined.
This can be overridden to change the behavior of the optimizer.
|
cortex/plugins.py
|
optimizer_step
|
bbradt/cortex
| 109 |
python
|
def optimizer_step(self):
'Makes a step of the optimizers for which losses are defined.\n\n This can be overridden to change the behavior of the optimizer.\n\n '
keys = self.losses.keys()
for (i, k) in enumerate(keys):
loss = self.losses.pop(k)
loss.backward(retain_graph=(i < len(keys)))
key = self.nets._aliases.get(k, k)
optimizer = self._optimizers.get(key)
if (optimizer is not None):
optimizer.step()
|
def optimizer_step(self):
'Makes a step of the optimizers for which losses are defined.\n\n This can be overridden to change the behavior of the optimizer.\n\n '
keys = self.losses.keys()
for (i, k) in enumerate(keys):
loss = self.losses.pop(k)
loss.backward(retain_graph=(i < len(keys)))
key = self.nets._aliases.get(k, k)
optimizer = self._optimizers.get(key)
if (optimizer is not None):
optimizer.step()<|docstring|>Makes a step of the optimizers for which losses are defined.
This can be overridden to change the behavior of the optimizer.<|endoftext|>
|
5c9f6737b214db005a1eb673dad754c6523435d0fecdf07efee64e42f96afbdc
|
def train_loop(self):
'The training loop.\n\n This can be overridden to change the behavior of the training loop.\n\n '
try:
while True:
self.train_step()
except StopIteration:
pass
|
The training loop.
This can be overridden to change the behavior of the training loop.
|
cortex/plugins.py
|
train_loop
|
bbradt/cortex
| 109 |
python
|
def train_loop(self):
'The training loop.\n\n This can be overridden to change the behavior of the training loop.\n\n '
try:
while True:
self.train_step()
except StopIteration:
pass
|
def train_loop(self):
'The training loop.\n\n This can be overridden to change the behavior of the training loop.\n\n '
try:
while True:
self.train_step()
except StopIteration:
pass<|docstring|>The training loop.
This can be overridden to change the behavior of the training loop.<|endoftext|>
|
d1f2f55c278c7e0b701e7cb0d1c6f3a6e8ff74985cbcf6fb201f621c7396382b
|
def eval_loop(self):
'The evaluation loop.\n\n This can be overridden to change the behavior of the evaluation loop.\n\n '
try:
while True:
self.eval_step()
except StopIteration:
pass
|
The evaluation loop.
This can be overridden to change the behavior of the evaluation loop.
|
cortex/plugins.py
|
eval_loop
|
bbradt/cortex
| 109 |
python
|
def eval_loop(self):
'The evaluation loop.\n\n This can be overridden to change the behavior of the evaluation loop.\n\n '
try:
while True:
self.eval_step()
except StopIteration:
pass
|
def eval_loop(self):
'The evaluation loop.\n\n This can be overridden to change the behavior of the evaluation loop.\n\n '
try:
while True:
self.eval_step()
except StopIteration:
pass<|docstring|>The evaluation loop.
This can be overridden to change the behavior of the evaluation loop.<|endoftext|>
|
e353fdb2b744c9d91d0e1ad8295d3efccd8fe76b789e66ddaca4e25f95fbdd6c
|
def get_dims(self, *queries):
'Gets dimensions of inputs.\n\n Args:\n *queries: Variables to get dimensions of .\n\n Returns:\n Dimensions of the variables.\n\n '
return self._data.get_dims(*queries)
|
Gets dimensions of inputs.
Args:
*queries: Variables to get dimensions of .
Returns:
Dimensions of the variables.
|
cortex/plugins.py
|
get_dims
|
bbradt/cortex
| 109 |
python
|
def get_dims(self, *queries):
'Gets dimensions of inputs.\n\n Args:\n *queries: Variables to get dimensions of .\n\n Returns:\n Dimensions of the variables.\n\n '
return self._data.get_dims(*queries)
|
def get_dims(self, *queries):
'Gets dimensions of inputs.\n\n Args:\n *queries: Variables to get dimensions of .\n\n Returns:\n Dimensions of the variables.\n\n '
return self._data.get_dims(*queries)<|docstring|>Gets dimensions of inputs.
Args:
*queries: Variables to get dimensions of .
Returns:
Dimensions of the variables.<|endoftext|>
|
0649223fce2f82057d3a02010a388f7d74fec0fe65a75d3daf60a979e2e87f72
|
def add_noise(self, key, dist=None, size=None, **kwargs):
'Adds a noise variable to the model.\n\n Args:\n key (str): Name of the noise variable.\n dist (str): Noise distribution.\n size (int): Size of the noise.\n **kwargs: keyword arguments for noise distribution.\n '
self._data.add_noise(key, dist=dist, size=size, **kwargs)
|
Adds a noise variable to the model.
Args:
key (str): Name of the noise variable.
dist (str): Noise distribution.
size (int): Size of the noise.
**kwargs: keyword arguments for noise distribution.
|
cortex/plugins.py
|
add_noise
|
bbradt/cortex
| 109 |
python
|
def add_noise(self, key, dist=None, size=None, **kwargs):
'Adds a noise variable to the model.\n\n Args:\n key (str): Name of the noise variable.\n dist (str): Noise distribution.\n size (int): Size of the noise.\n **kwargs: keyword arguments for noise distribution.\n '
self._data.add_noise(key, dist=dist, size=size, **kwargs)
|
def add_noise(self, key, dist=None, size=None, **kwargs):
'Adds a noise variable to the model.\n\n Args:\n key (str): Name of the noise variable.\n dist (str): Noise distribution.\n size (int): Size of the noise.\n **kwargs: keyword arguments for noise distribution.\n '
self._data.add_noise(key, dist=dist, size=size, **kwargs)<|docstring|>Adds a noise variable to the model.
Args:
key (str): Name of the noise variable.
dist (str): Noise distribution.
size (int): Size of the noise.
**kwargs: keyword arguments for noise distribution.<|endoftext|>
|
f33faf623f5f5d0d6b2a22d8ad8eafc1f41adfdf3c94e1aa0bb2052d70f468db
|
def add_image(self, *args, **kwargs):
'Adds image for visualization.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_image(*args, **kwargs)
|
Adds image for visualization.
Args:
*args: TODO
**kwargs: TODO
|
cortex/plugins.py
|
add_image
|
bbradt/cortex
| 109 |
python
|
def add_image(self, *args, **kwargs):
'Adds image for visualization.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_image(*args, **kwargs)
|
def add_image(self, *args, **kwargs):
'Adds image for visualization.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_image(*args, **kwargs)<|docstring|>Adds image for visualization.
Args:
*args: TODO
**kwargs: TODO<|endoftext|>
|
7b0b2073a60dcbf6d93663c064128384c0d7c9b328de065fa850a17a72b74f09
|
def add_histogram(self, *args, **kwargs):
'Adds histogram for visualizaiton.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_histogram(*args, **kwargs)
|
Adds histogram for visualizaiton.
Args:
*args: TODO
**kwargs: TODO
|
cortex/plugins.py
|
add_histogram
|
bbradt/cortex
| 109 |
python
|
def add_histogram(self, *args, **kwargs):
'Adds histogram for visualizaiton.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_histogram(*args, **kwargs)
|
def add_histogram(self, *args, **kwargs):
'Adds histogram for visualizaiton.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_histogram(*args, **kwargs)<|docstring|>Adds histogram for visualizaiton.
Args:
*args: TODO
**kwargs: TODO<|endoftext|>
|
e957987302fe8e89ce62cc665c304baaf4316a1feb06e77c0aeae62f924330b2
|
def add_scatter(self, *args, **kwargs):
'Adds a scatter plot to visualization.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_scatter(*args, **kwargs)
|
Adds a scatter plot to visualization.
Args:
*args: TODO
**kwargs: TODO
|
cortex/plugins.py
|
add_scatter
|
bbradt/cortex
| 109 |
python
|
def add_scatter(self, *args, **kwargs):
'Adds a scatter plot to visualization.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_scatter(*args, **kwargs)
|
def add_scatter(self, *args, **kwargs):
'Adds a scatter plot to visualization.\n\n Args:\n *args: TODO\n **kwargs: TODO\n\n '
self._viz.add_scatter(*args, **kwargs)<|docstring|>Adds a scatter plot to visualization.
Args:
*args: TODO
**kwargs: TODO<|endoftext|>
|
52ad28545b2dae4ab53f97cbf5502123cba524be7dce06b0a7be60b017ea42ef
|
def get_readers():
'\n Get all available writers.\n\n Returns:\n list: available writers\n '
return all_readers
|
Get all available writers.
Returns:
list: available writers
|
muddery/worldeditor/utils/readers.py
|
get_readers
|
dongwudanci/muddery
| 127 |
python
|
def get_readers():
'\n Get all available writers.\n\n Returns:\n list: available writers\n '
return all_readers
|
def get_readers():
'\n Get all available writers.\n\n Returns:\n list: available writers\n '
return all_readers<|docstring|>Get all available writers.
Returns:
list: available writers<|endoftext|>
|
f76930759aaac83646a1432e165688d2ea026ecb478d13edbed097eb84a0f4ca
|
def get_reader(reader_type):
"\n Get a reader by reader's type.\n\n Args:\n type: (String) reader's type.\n\n Returns:\n reader\n "
return reader_dict.get(reader_type, None)
|
Get a reader by reader's type.
Args:
type: (String) reader's type.
Returns:
reader
|
muddery/worldeditor/utils/readers.py
|
get_reader
|
dongwudanci/muddery
| 127 |
python
|
def get_reader(reader_type):
"\n Get a reader by reader's type.\n\n Args:\n type: (String) reader's type.\n\n Returns:\n reader\n "
return reader_dict.get(reader_type, None)
|
def get_reader(reader_type):
"\n Get a reader by reader's type.\n\n Args:\n type: (String) reader's type.\n\n Returns:\n reader\n "
return reader_dict.get(reader_type, None)<|docstring|>Get a reader by reader's type.
Args:
type: (String) reader's type.
Returns:
reader<|endoftext|>
|
eadee11ca966fc605bbdcaac17b1ae048d471c3aa4a290bc3c48a1bb346d55e3
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
self.filename = filename
|
Args:
filename: (String) data file's name.
Returns:
None
|
muddery/worldeditor/utils/readers.py
|
__init__
|
dongwudanci/muddery
| 127 |
python
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
self.filename = filename
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
self.filename = filename<|docstring|>Args:
filename: (String) data file's name.
Returns:
None<|endoftext|>
|
b48b0f27be90f26629afdc4a862f1e8146d71245271aad523da5dad38e1a6ac9
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
raise StopIteration
|
Read data line.
Returns:
list: data line
|
muddery/worldeditor/utils/readers.py
|
readln
|
dongwudanci/muddery
| 127 |
python
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
raise StopIteration
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
raise StopIteration<|docstring|>Read data line.
Returns:
list: data line<|endoftext|>
|
6563f54c84af827fcb8f9ff6867e403299340948348ddcbd0a508b15fd599150
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
super(CSVReader, self).__init__(filename)
self.reader = None
self.csvfile = None
if filename:
try:
self.csvfile = open(filename, 'r', encoding='utf-8')
head = self.csvfile.read(len(codecs.BOM_UTF8))
if (head != codecs.BOM_UTF8):
self.csvfile.seek(0)
except UnicodeDecodeError:
self.csvfile = open(filename, 'r')
self.reader = csv.reader(self.csvfile)
|
Args:
filename: (String) data file's name.
Returns:
None
|
muddery/worldeditor/utils/readers.py
|
__init__
|
dongwudanci/muddery
| 127 |
python
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
super(CSVReader, self).__init__(filename)
self.reader = None
self.csvfile = None
if filename:
try:
self.csvfile = open(filename, 'r', encoding='utf-8')
head = self.csvfile.read(len(codecs.BOM_UTF8))
if (head != codecs.BOM_UTF8):
self.csvfile.seek(0)
except UnicodeDecodeError:
self.csvfile = open(filename, 'r')
self.reader = csv.reader(self.csvfile)
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
super(CSVReader, self).__init__(filename)
self.reader = None
self.csvfile = None
if filename:
try:
self.csvfile = open(filename, 'r', encoding='utf-8')
head = self.csvfile.read(len(codecs.BOM_UTF8))
if (head != codecs.BOM_UTF8):
self.csvfile.seek(0)
except UnicodeDecodeError:
self.csvfile = open(filename, 'r')
self.reader = csv.reader(self.csvfile)<|docstring|>Args:
filename: (String) data file's name.
Returns:
None<|endoftext|>
|
392d08924a42b0252f9f4abf28ef8448ae4f71a087eb517e136e6c8c18d1704c
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
if (not self.reader):
raise StopIteration
return next(self.reader)
|
Read data line.
Returns:
list: data line
|
muddery/worldeditor/utils/readers.py
|
readln
|
dongwudanci/muddery
| 127 |
python
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
if (not self.reader):
raise StopIteration
return next(self.reader)
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
if (not self.reader):
raise StopIteration
return next(self.reader)<|docstring|>Read data line.
Returns:
list: data line<|endoftext|>
|
3beaa08f3a752aa83d5486563db536d5975a8fa0c83d26286173c5ebaf574f87
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
super(XLSReader, self).__init__(filename)
if (not xlrd):
print('**********************************************************')
print('You need to install "xlrd" first to import xls/xlsx files!')
print('You can use "pip install xlrd" to install it! ')
print('**********************************************************')
return
self.sheet = None
self.row_pos = 0
if filename:
book = xlrd.open_workbook(filename)
self.sheet = book.sheet_by_index(0)
|
Args:
filename: (String) data file's name.
Returns:
None
|
muddery/worldeditor/utils/readers.py
|
__init__
|
dongwudanci/muddery
| 127 |
python
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
super(XLSReader, self).__init__(filename)
if (not xlrd):
print('**********************************************************')
print('You need to install "xlrd" first to import xls/xlsx files!')
print('You can use "pip install xlrd" to install it! ')
print('**********************************************************')
return
self.sheet = None
self.row_pos = 0
if filename:
book = xlrd.open_workbook(filename)
self.sheet = book.sheet_by_index(0)
|
def __init__(self, filename=None):
"\n Args:\n filename: (String) data file's name.\n\n Returns:\n None\n "
super(XLSReader, self).__init__(filename)
if (not xlrd):
print('**********************************************************')
print('You need to install "xlrd" first to import xls/xlsx files!')
print('You can use "pip install xlrd" to install it! ')
print('**********************************************************')
return
self.sheet = None
self.row_pos = 0
if filename:
book = xlrd.open_workbook(filename)
self.sheet = book.sheet_by_index(0)<|docstring|>Args:
filename: (String) data file's name.
Returns:
None<|endoftext|>
|
0baa1c9b7b54b11d500e3cd6dd7612dd17b73daf05a11f6c6c209e42bef256e9
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
if (not self.sheet):
raise StopIteration
if (self.row_pos >= self.sheet.nrows):
raise StopIteration
pos = self.row_pos
self.row_pos += 1
return self.sheet.row_values(pos)
|
Read data line.
Returns:
list: data line
|
muddery/worldeditor/utils/readers.py
|
readln
|
dongwudanci/muddery
| 127 |
python
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
if (not self.sheet):
raise StopIteration
if (self.row_pos >= self.sheet.nrows):
raise StopIteration
pos = self.row_pos
self.row_pos += 1
return self.sheet.row_values(pos)
|
def readln(self):
'\n Read data line.\n\n Returns:\n list: data line\n '
if (not self.sheet):
raise StopIteration
if (self.row_pos >= self.sheet.nrows):
raise StopIteration
pos = self.row_pos
self.row_pos += 1
return self.sheet.row_values(pos)<|docstring|>Read data line.
Returns:
list: data line<|endoftext|>
|
2473d81ac0f72d2b6fc9d814cf96301b0542966e6689edc059f7e9b6b48aec30
|
def proto_factor_cosine(local_proto, global_proto):
'\n [C, D]: D is 64 or 4\n '
norm_local = torch.norm(local_proto, dim=(- 1), keepdim=False)
norm_global = torch.norm(global_proto, dim=(- 1), keepdim=False)
factor_refined = (torch.sum((local_proto * global_proto), dim=(- 1), keepdim=False) / ((norm_local * norm_global) + 1e-06))
return factor_refined
|
[C, D]: D is 64 or 4
|
loss_func.py
|
proto_factor_cosine
|
CityU-AIM-Group/PRR-Imbalance
| 0 |
python
|
def proto_factor_cosine(local_proto, global_proto):
'\n \n '
norm_local = torch.norm(local_proto, dim=(- 1), keepdim=False)
norm_global = torch.norm(global_proto, dim=(- 1), keepdim=False)
factor_refined = (torch.sum((local_proto * global_proto), dim=(- 1), keepdim=False) / ((norm_local * norm_global) + 1e-06))
return factor_refined
|
def proto_factor_cosine(local_proto, global_proto):
'\n \n '
norm_local = torch.norm(local_proto, dim=(- 1), keepdim=False)
norm_global = torch.norm(global_proto, dim=(- 1), keepdim=False)
factor_refined = (torch.sum((local_proto * global_proto), dim=(- 1), keepdim=False) / ((norm_local * norm_global) + 1e-06))
return factor_refined<|docstring|>[C, D]: D is 64 or 4<|endoftext|>
|
05f952f5d0c224b7d19662fd7b7e1726a76a6eff6ca5ab2a1ee568f9a49f577d
|
def proto_factor_cosine(self, source_proto, target_proto):
'\n [C, D]: D is 64 or 4\n '
norm_source = torch.norm(source_proto, dim=(- 1), keepdim=False)
norm_target = torch.norm(target_proto.detach(), dim=(- 1), keepdim=False)
factor_refined = (torch.sum((source_proto * target_proto.detach()), dim=(- 1), keepdim=False) / ((norm_source * norm_target) + self.eps))
return factor_refined
|
[C, D]: D is 64 or 4
|
loss_func.py
|
proto_factor_cosine
|
CityU-AIM-Group/PRR-Imbalance
| 0 |
python
|
def proto_factor_cosine(self, source_proto, target_proto):
'\n \n '
norm_source = torch.norm(source_proto, dim=(- 1), keepdim=False)
norm_target = torch.norm(target_proto.detach(), dim=(- 1), keepdim=False)
factor_refined = (torch.sum((source_proto * target_proto.detach()), dim=(- 1), keepdim=False) / ((norm_source * norm_target) + self.eps))
return factor_refined
|
def proto_factor_cosine(self, source_proto, target_proto):
'\n \n '
norm_source = torch.norm(source_proto, dim=(- 1), keepdim=False)
norm_target = torch.norm(target_proto.detach(), dim=(- 1), keepdim=False)
factor_refined = (torch.sum((source_proto * target_proto.detach()), dim=(- 1), keepdim=False) / ((norm_source * norm_target) + self.eps))
return factor_refined<|docstring|>[C, D]: D is 64 or 4<|endoftext|>
|
0399ac41b9823709572250495c12158fdcd5411b13b72fa7fd950961dc4e0c19
|
def GradientDescent(f, x0, delta=1e-05):
'\n GradientDescent( f , x0 , delta=1e-5 ).\n This method finds global minimum using gradient descent.\n\n Parameters:\n f (function): the function to minimize\n x0 (vector): initial value for gradient descent\n delta (float): tolerance\n\n Returns:\n x: the argument that minimizes the function\n\n '
x = x0
d = Gradient(f)(x)
while (npla.norm(d) > delta):
phi = (lambda alpha: f((x - (alpha * d))))
alpha = op.newton(phi, 0)
x = (x - (d * alpha))
d = Gradient(f)(x)
return x
|
GradientDescent( f , x0 , delta=1e-5 ).
This method finds global minimum using gradient descent.
Parameters:
f (function): the function to minimize
x0 (vector): initial value for gradient descent
delta (float): tolerance
Returns:
x: the argument that minimizes the function
|
NumAn_Op/multi_dim_min.py
|
GradientDescent
|
FILALIHicham/Numerical_Analysis_Optimization_Package
| 1 |
python
|
def GradientDescent(f, x0, delta=1e-05):
'\n GradientDescent( f , x0 , delta=1e-5 ).\n This method finds global minimum using gradient descent.\n\n Parameters:\n f (function): the function to minimize\n x0 (vector): initial value for gradient descent\n delta (float): tolerance\n\n Returns:\n x: the argument that minimizes the function\n\n '
x = x0
d = Gradient(f)(x)
while (npla.norm(d) > delta):
phi = (lambda alpha: f((x - (alpha * d))))
alpha = op.newton(phi, 0)
x = (x - (d * alpha))
d = Gradient(f)(x)
return x
|
def GradientDescent(f, x0, delta=1e-05):
'\n GradientDescent( f , x0 , delta=1e-5 ).\n This method finds global minimum using gradient descent.\n\n Parameters:\n f (function): the function to minimize\n x0 (vector): initial value for gradient descent\n delta (float): tolerance\n\n Returns:\n x: the argument that minimizes the function\n\n '
x = x0
d = Gradient(f)(x)
while (npla.norm(d) > delta):
phi = (lambda alpha: f((x - (alpha * d))))
alpha = op.newton(phi, 0)
x = (x - (d * alpha))
d = Gradient(f)(x)
return x<|docstring|>GradientDescent( f , x0 , delta=1e-5 ).
This method finds global minimum using gradient descent.
Parameters:
f (function): the function to minimize
x0 (vector): initial value for gradient descent
delta (float): tolerance
Returns:
x: the argument that minimizes the function<|endoftext|>
|
3ad66e8cfaf2b6236ebc6a57843173fa06aa923c47d0d43759f82ea981983b1d
|
def ConjugateGradient(f, x, Q, b):
'\n ConjugateGradient( f , x , Q , b ).\n This method finds global minimum using conjugate gradient.\n\n Parameters:\n f (function): the function to minimize\n x (vector): initial value for conjugategradient\n Q (array): positive definite nxn symmetric matrix\n b (vector)\n\n Returns:\n x: the argument that minimizes the function\n \n '
n = x.shape[0]
p = (- (np.dot(Q, x) - b))
for i in range(n):
alpha = ((- np.dot((np.dot(Q, x) - b), p)) / np.dot(np.dot(p, Q), p))
x = (x + (alpha * p))
grad = Gradient(f)(x)
beta = (np.dot(np.dot(grad, Q), p) / np.dot(np.dot(p, Q), p))
p = ((- grad) + (beta * p))
return x
|
ConjugateGradient( f , x , Q , b ).
This method finds global minimum using conjugate gradient.
Parameters:
f (function): the function to minimize
x (vector): initial value for conjugategradient
Q (array): positive definite nxn symmetric matrix
b (vector)
Returns:
x: the argument that minimizes the function
|
NumAn_Op/multi_dim_min.py
|
ConjugateGradient
|
FILALIHicham/Numerical_Analysis_Optimization_Package
| 1 |
python
|
def ConjugateGradient(f, x, Q, b):
'\n ConjugateGradient( f , x , Q , b ).\n This method finds global minimum using conjugate gradient.\n\n Parameters:\n f (function): the function to minimize\n x (vector): initial value for conjugategradient\n Q (array): positive definite nxn symmetric matrix\n b (vector)\n\n Returns:\n x: the argument that minimizes the function\n \n '
n = x.shape[0]
p = (- (np.dot(Q, x) - b))
for i in range(n):
alpha = ((- np.dot((np.dot(Q, x) - b), p)) / np.dot(np.dot(p, Q), p))
x = (x + (alpha * p))
grad = Gradient(f)(x)
beta = (np.dot(np.dot(grad, Q), p) / np.dot(np.dot(p, Q), p))
p = ((- grad) + (beta * p))
return x
|
def ConjugateGradient(f, x, Q, b):
'\n ConjugateGradient( f , x , Q , b ).\n This method finds global minimum using conjugate gradient.\n\n Parameters:\n f (function): the function to minimize\n x (vector): initial value for conjugategradient\n Q (array): positive definite nxn symmetric matrix\n b (vector)\n\n Returns:\n x: the argument that minimizes the function\n \n '
n = x.shape[0]
p = (- (np.dot(Q, x) - b))
for i in range(n):
alpha = ((- np.dot((np.dot(Q, x) - b), p)) / np.dot(np.dot(p, Q), p))
x = (x + (alpha * p))
grad = Gradient(f)(x)
beta = (np.dot(np.dot(grad, Q), p) / np.dot(np.dot(p, Q), p))
p = ((- grad) + (beta * p))
return x<|docstring|>ConjugateGradient( f , x , Q , b ).
This method finds global minimum using conjugate gradient.
Parameters:
f (function): the function to minimize
x (vector): initial value for conjugategradient
Q (array): positive definite nxn symmetric matrix
b (vector)
Returns:
x: the argument that minimizes the function<|endoftext|>
|
d324416d9eedbb66bdbeffd45222d261ffb759867b255417db18181619089c30
|
def adagrad(f, df, bounds, n_iter=100, step_size=0.1):
'\n adagrad( f , df , bounds , n_iter = 100 , step_size = .1 ).\n This method finds global minimum using the stochastic gradient descent variant: AdaGrad.\n\n Parameters:\n f (function): the function to minimize\n df (function): the first derivative of the function to minimize\n bounds (vector): uncertainty interval\n n_iter (int): number of iteration\n step_size (vector): step size\n\n Returns:\n solution: small interval that encloses the minimum value\n \n '
solution = (bounds[(:, 0)] + (rand(len(bounds)) * (bounds[(:, 1)] - bounds[(:, 0)])))
sq_grad_sums = [0.0 for _ in range(bounds.shape[0])]
for it in range(n_iter):
gradient = df(solution[0], solution[1])
for i in range(gradient.shape[0]):
sq_grad_sums[i] += (gradient[i] ** 2.0)
new_solution = list()
for i in range(solution.shape[0]):
alpha = (step_size / (1e-08 + sqrt(sq_grad_sums[i])))
value = (solution[i] - (alpha * gradient[i]))
new_solution.append(value)
solution = asarray(new_solution)
return solution
|
adagrad( f , df , bounds , n_iter = 100 , step_size = .1 ).
This method finds global minimum using the stochastic gradient descent variant: AdaGrad.
Parameters:
f (function): the function to minimize
df (function): the first derivative of the function to minimize
bounds (vector): uncertainty interval
n_iter (int): number of iteration
step_size (vector): step size
Returns:
solution: small interval that encloses the minimum value
|
NumAn_Op/multi_dim_min.py
|
adagrad
|
FILALIHicham/Numerical_Analysis_Optimization_Package
| 1 |
python
|
def adagrad(f, df, bounds, n_iter=100, step_size=0.1):
'\n adagrad( f , df , bounds , n_iter = 100 , step_size = .1 ).\n This method finds global minimum using the stochastic gradient descent variant: AdaGrad.\n\n Parameters:\n f (function): the function to minimize\n df (function): the first derivative of the function to minimize\n bounds (vector): uncertainty interval\n n_iter (int): number of iteration\n step_size (vector): step size\n\n Returns:\n solution: small interval that encloses the minimum value\n \n '
solution = (bounds[(:, 0)] + (rand(len(bounds)) * (bounds[(:, 1)] - bounds[(:, 0)])))
sq_grad_sums = [0.0 for _ in range(bounds.shape[0])]
for it in range(n_iter):
gradient = df(solution[0], solution[1])
for i in range(gradient.shape[0]):
sq_grad_sums[i] += (gradient[i] ** 2.0)
new_solution = list()
for i in range(solution.shape[0]):
alpha = (step_size / (1e-08 + sqrt(sq_grad_sums[i])))
value = (solution[i] - (alpha * gradient[i]))
new_solution.append(value)
solution = asarray(new_solution)
return solution
|
def adagrad(f, df, bounds, n_iter=100, step_size=0.1):
'\n adagrad( f , df , bounds , n_iter = 100 , step_size = .1 ).\n This method finds global minimum using the stochastic gradient descent variant: AdaGrad.\n\n Parameters:\n f (function): the function to minimize\n df (function): the first derivative of the function to minimize\n bounds (vector): uncertainty interval\n n_iter (int): number of iteration\n step_size (vector): step size\n\n Returns:\n solution: small interval that encloses the minimum value\n \n '
solution = (bounds[(:, 0)] + (rand(len(bounds)) * (bounds[(:, 1)] - bounds[(:, 0)])))
sq_grad_sums = [0.0 for _ in range(bounds.shape[0])]
for it in range(n_iter):
gradient = df(solution[0], solution[1])
for i in range(gradient.shape[0]):
sq_grad_sums[i] += (gradient[i] ** 2.0)
new_solution = list()
for i in range(solution.shape[0]):
alpha = (step_size / (1e-08 + sqrt(sq_grad_sums[i])))
value = (solution[i] - (alpha * gradient[i]))
new_solution.append(value)
solution = asarray(new_solution)
return solution<|docstring|>adagrad( f , df , bounds , n_iter = 100 , step_size = .1 ).
This method finds global minimum using the stochastic gradient descent variant: AdaGrad.
Parameters:
f (function): the function to minimize
df (function): the first derivative of the function to minimize
bounds (vector): uncertainty interval
n_iter (int): number of iteration
step_size (vector): step size
Returns:
solution: small interval that encloses the minimum value<|endoftext|>
|
6d6482d74a070c6a0d7064ed1b94d2bb3f3def0287aca3283b7ea27c2e6ed66a
|
def Newton(f, x0, delta=1e-05):
'\n Newton( f, x0 , delta = 1e-5 ).\n This method finds global minimum using Newton method.\n\n Parameters:\n f (function): the function to minimize\n x0 (vector): initial value for Newton method\n delta (float): tolerance\n\n Returns:\n x: the argument that minimizes the function\n\n '
(x, n) = (x0, len(x0))
I = np.identity(n)
d = (- np.dot(npla.inv(Hessian(f)(x)), Gradient(f)(x)))
while (npla.norm(d) > delta):
x = (x + d)
if np.all((np.linalg.eigvals(Hessian(f)(x)) > 0)):
d = (- np.dot(npla.inv(Hessian(f)(x)), Gradient(f)(x)))
else:
d = (- np.dot(npla.inv(((delta * I) + Hessian(f)(x))), Gradient(f)(x)))
return x
|
Newton( f, x0 , delta = 1e-5 ).
This method finds global minimum using Newton method.
Parameters:
f (function): the function to minimize
x0 (vector): initial value for Newton method
delta (float): tolerance
Returns:
x: the argument that minimizes the function
|
NumAn_Op/multi_dim_min.py
|
Newton
|
FILALIHicham/Numerical_Analysis_Optimization_Package
| 1 |
python
|
def Newton(f, x0, delta=1e-05):
'\n Newton( f, x0 , delta = 1e-5 ).\n This method finds global minimum using Newton method.\n\n Parameters:\n f (function): the function to minimize\n x0 (vector): initial value for Newton method\n delta (float): tolerance\n\n Returns:\n x: the argument that minimizes the function\n\n '
(x, n) = (x0, len(x0))
I = np.identity(n)
d = (- np.dot(npla.inv(Hessian(f)(x)), Gradient(f)(x)))
while (npla.norm(d) > delta):
x = (x + d)
if np.all((np.linalg.eigvals(Hessian(f)(x)) > 0)):
d = (- np.dot(npla.inv(Hessian(f)(x)), Gradient(f)(x)))
else:
d = (- np.dot(npla.inv(((delta * I) + Hessian(f)(x))), Gradient(f)(x)))
return x
|
def Newton(f, x0, delta=1e-05):
'\n Newton( f, x0 , delta = 1e-5 ).\n This method finds global minimum using Newton method.\n\n Parameters:\n f (function): the function to minimize\n x0 (vector): initial value for Newton method\n delta (float): tolerance\n\n Returns:\n x: the argument that minimizes the function\n\n '
(x, n) = (x0, len(x0))
I = np.identity(n)
d = (- np.dot(npla.inv(Hessian(f)(x)), Gradient(f)(x)))
while (npla.norm(d) > delta):
x = (x + d)
if np.all((np.linalg.eigvals(Hessian(f)(x)) > 0)):
d = (- np.dot(npla.inv(Hessian(f)(x)), Gradient(f)(x)))
else:
d = (- np.dot(npla.inv(((delta * I) + Hessian(f)(x))), Gradient(f)(x)))
return x<|docstring|>Newton( f, x0 , delta = 1e-5 ).
This method finds global minimum using Newton method.
Parameters:
f (function): the function to minimize
x0 (vector): initial value for Newton method
delta (float): tolerance
Returns:
x: the argument that minimizes the function<|endoftext|>
|
297fc88a15ad1983feb213508a2db35656b0d5adbfb91b867f913f43d00fb8c2
|
def armijo(phi, alpha0, e=0.01, beta=2):
'\n armijo( phi , alpha0 , eps = .01 , beta = 2 ).\n This method calculate the optimal step size for descent methods.\n\n Parameters:\n phi (function): the function to minimize\n alpha0 (vector): initial step size of our optimization algorithm\n e (float): tolerance (a small value)\n beta (float): scaling factor\n\n Returns:\n alpha: improved step size\n\n '
if (alpha0 == 0):
alpha = 0.1
else:
alpha = alpha0
if test(1, phi, e, alpha):
while (not test(2, phi, e, (beta * alpha))):
alpha = (beta * alpha)
else:
while (not test(1, phi, e, alpha)):
alpha = (alpha / beta)
return alpha
|
armijo( phi , alpha0 , eps = .01 , beta = 2 ).
This method calculate the optimal step size for descent methods.
Parameters:
phi (function): the function to minimize
alpha0 (vector): initial step size of our optimization algorithm
e (float): tolerance (a small value)
beta (float): scaling factor
Returns:
alpha: improved step size
|
NumAn_Op/multi_dim_min.py
|
armijo
|
FILALIHicham/Numerical_Analysis_Optimization_Package
| 1 |
python
|
def armijo(phi, alpha0, e=0.01, beta=2):
'\n armijo( phi , alpha0 , eps = .01 , beta = 2 ).\n This method calculate the optimal step size for descent methods.\n\n Parameters:\n phi (function): the function to minimize\n alpha0 (vector): initial step size of our optimization algorithm\n e (float): tolerance (a small value)\n beta (float): scaling factor\n\n Returns:\n alpha: improved step size\n\n '
if (alpha0 == 0):
alpha = 0.1
else:
alpha = alpha0
if test(1, phi, e, alpha):
while (not test(2, phi, e, (beta * alpha))):
alpha = (beta * alpha)
else:
while (not test(1, phi, e, alpha)):
alpha = (alpha / beta)
return alpha
|
def armijo(phi, alpha0, e=0.01, beta=2):
'\n armijo( phi , alpha0 , eps = .01 , beta = 2 ).\n This method calculate the optimal step size for descent methods.\n\n Parameters:\n phi (function): the function to minimize\n alpha0 (vector): initial step size of our optimization algorithm\n e (float): tolerance (a small value)\n beta (float): scaling factor\n\n Returns:\n alpha: improved step size\n\n '
if (alpha0 == 0):
alpha = 0.1
else:
alpha = alpha0
if test(1, phi, e, alpha):
while (not test(2, phi, e, (beta * alpha))):
alpha = (beta * alpha)
else:
while (not test(1, phi, e, alpha)):
alpha = (alpha / beta)
return alpha<|docstring|>armijo( phi , alpha0 , eps = .01 , beta = 2 ).
This method calculate the optimal step size for descent methods.
Parameters:
phi (function): the function to minimize
alpha0 (vector): initial step size of our optimization algorithm
e (float): tolerance (a small value)
beta (float): scaling factor
Returns:
alpha: improved step size<|endoftext|>
|
b8b05d4ab2df6d927401e661dc2a1969e7d949bfcece687dd202c15abed20412
|
def quasiNewton(f, X0, delta=0.01):
"\n def quasiNewton(f, X0, delta = .01)\n This method minimize a multi-dimensional function\n \n Parameters:\n f (function): multi-dimensional function to minimize\n x0 (vector): intial point's coordinates\n delta (float): small quantity to check convergence\n\n Returns:\n X0: small interval that encloses the minimum value.\n\n "
X0 = np.array([X0]).T
global X
X = X0
grd1 = np.array([Gradient(f)(X0)]).T
H = np.eye(len(X))
while (np.linalg.norm(grd1) > delta):
d = ((- H) @ grd1)
phi = (lambda alpha: f((X - (alpha * grd1))))
alphak = armijo(phi, 1)
X = (X0 + (alphak * d))
grd0 = grd1
grd1 = np.array([Gradient(f)(X)]).T
H = DFP(H, alphak, d, grd0, grd1)
X0 = X
return X0
|
def quasiNewton(f, X0, delta = .01)
This method minimize a multi-dimensional function
Parameters:
f (function): multi-dimensional function to minimize
x0 (vector): intial point's coordinates
delta (float): small quantity to check convergence
Returns:
X0: small interval that encloses the minimum value.
|
NumAn_Op/multi_dim_min.py
|
quasiNewton
|
FILALIHicham/Numerical_Analysis_Optimization_Package
| 1 |
python
|
def quasiNewton(f, X0, delta=0.01):
"\n def quasiNewton(f, X0, delta = .01)\n This method minimize a multi-dimensional function\n \n Parameters:\n f (function): multi-dimensional function to minimize\n x0 (vector): intial point's coordinates\n delta (float): small quantity to check convergence\n\n Returns:\n X0: small interval that encloses the minimum value.\n\n "
X0 = np.array([X0]).T
global X
X = X0
grd1 = np.array([Gradient(f)(X0)]).T
H = np.eye(len(X))
while (np.linalg.norm(grd1) > delta):
d = ((- H) @ grd1)
phi = (lambda alpha: f((X - (alpha * grd1))))
alphak = armijo(phi, 1)
X = (X0 + (alphak * d))
grd0 = grd1
grd1 = np.array([Gradient(f)(X)]).T
H = DFP(H, alphak, d, grd0, grd1)
X0 = X
return X0
|
def quasiNewton(f, X0, delta=0.01):
"\n def quasiNewton(f, X0, delta = .01)\n This method minimize a multi-dimensional function\n \n Parameters:\n f (function): multi-dimensional function to minimize\n x0 (vector): intial point's coordinates\n delta (float): small quantity to check convergence\n\n Returns:\n X0: small interval that encloses the minimum value.\n\n "
X0 = np.array([X0]).T
global X
X = X0
grd1 = np.array([Gradient(f)(X0)]).T
H = np.eye(len(X))
while (np.linalg.norm(grd1) > delta):
d = ((- H) @ grd1)
phi = (lambda alpha: f((X - (alpha * grd1))))
alphak = armijo(phi, 1)
X = (X0 + (alphak * d))
grd0 = grd1
grd1 = np.array([Gradient(f)(X)]).T
H = DFP(H, alphak, d, grd0, grd1)
X0 = X
return X0<|docstring|>def quasiNewton(f, X0, delta = .01)
This method minimize a multi-dimensional function
Parameters:
f (function): multi-dimensional function to minimize
x0 (vector): intial point's coordinates
delta (float): small quantity to check convergence
Returns:
X0: small interval that encloses the minimum value.<|endoftext|>
|
7680f487b6988083a2037f6d40030464f8ba99cbe29ff9f7bf197d2f16e02f49
|
def VirtualMachineQuickStats(vim, *args, **kwargs):
'A set of statistics that are typically updated with near real-time regularity.\n This data object type does not support notification, for scalability reasons.\n Therefore, changes in QuickStats do not generate property collector updates. To\n monitor statistics values, use the statistics and alarms modules instead.'
obj = vim.client.factory.create('{urn:vim25}VirtualMachineQuickStats')
if ((len(args) + len(kwargs)) < 1):
raise IndexError(('Expected at least 2 arguments got: %d' % len(args)))
required = ['guestHeartbeatStatus']
optional = ['balloonedMemory', 'compressedMemory', 'consumedOverheadMemory', 'distributedCpuEntitlement', 'distributedMemoryEntitlement', 'ftLatencyStatus', 'ftLogBandwidth', 'ftSecondaryLatency', 'guestMemoryUsage', 'hostMemoryUsage', 'overallCpuDemand', 'overallCpuUsage', 'privateMemory', 'sharedMemory', 'ssdSwappedMemory', 'staticCpuEntitlement', 'staticMemoryEntitlement', 'swappedMemory', 'uptimeSeconds', 'dynamicProperty', 'dynamicType']
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj
|
A set of statistics that are typically updated with near real-time regularity.
This data object type does not support notification, for scalability reasons.
Therefore, changes in QuickStats do not generate property collector updates. To
monitor statistics values, use the statistics and alarms modules instead.
|
pyvisdk/do/virtual_machine_quick_stats.py
|
VirtualMachineQuickStats
|
Infinidat/pyvisdk
| 0 |
python
|
def VirtualMachineQuickStats(vim, *args, **kwargs):
'A set of statistics that are typically updated with near real-time regularity.\n This data object type does not support notification, for scalability reasons.\n Therefore, changes in QuickStats do not generate property collector updates. To\n monitor statistics values, use the statistics and alarms modules instead.'
obj = vim.client.factory.create('{urn:vim25}VirtualMachineQuickStats')
if ((len(args) + len(kwargs)) < 1):
raise IndexError(('Expected at least 2 arguments got: %d' % len(args)))
required = ['guestHeartbeatStatus']
optional = ['balloonedMemory', 'compressedMemory', 'consumedOverheadMemory', 'distributedCpuEntitlement', 'distributedMemoryEntitlement', 'ftLatencyStatus', 'ftLogBandwidth', 'ftSecondaryLatency', 'guestMemoryUsage', 'hostMemoryUsage', 'overallCpuDemand', 'overallCpuUsage', 'privateMemory', 'sharedMemory', 'ssdSwappedMemory', 'staticCpuEntitlement', 'staticMemoryEntitlement', 'swappedMemory', 'uptimeSeconds', 'dynamicProperty', 'dynamicType']
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj
|
def VirtualMachineQuickStats(vim, *args, **kwargs):
'A set of statistics that are typically updated with near real-time regularity.\n This data object type does not support notification, for scalability reasons.\n Therefore, changes in QuickStats do not generate property collector updates. To\n monitor statistics values, use the statistics and alarms modules instead.'
obj = vim.client.factory.create('{urn:vim25}VirtualMachineQuickStats')
if ((len(args) + len(kwargs)) < 1):
raise IndexError(('Expected at least 2 arguments got: %d' % len(args)))
required = ['guestHeartbeatStatus']
optional = ['balloonedMemory', 'compressedMemory', 'consumedOverheadMemory', 'distributedCpuEntitlement', 'distributedMemoryEntitlement', 'ftLatencyStatus', 'ftLogBandwidth', 'ftSecondaryLatency', 'guestMemoryUsage', 'hostMemoryUsage', 'overallCpuDemand', 'overallCpuUsage', 'privateMemory', 'sharedMemory', 'ssdSwappedMemory', 'staticCpuEntitlement', 'staticMemoryEntitlement', 'swappedMemory', 'uptimeSeconds', 'dynamicProperty', 'dynamicType']
for (name, arg) in zip((required + optional), args):
setattr(obj, name, arg)
for (name, value) in kwargs.items():
if (name in (required + optional)):
setattr(obj, name, value)
else:
raise InvalidArgumentError(('Invalid argument: %s. Expected one of %s' % (name, ', '.join((required + optional)))))
return obj<|docstring|>A set of statistics that are typically updated with near real-time regularity.
This data object type does not support notification, for scalability reasons.
Therefore, changes in QuickStats do not generate property collector updates. To
monitor statistics values, use the statistics and alarms modules instead.<|endoftext|>
|
9410824fd07a28f205b042633c20c2624c51eac042fb50fa1145251e8812592e
|
def get_pipeline(config, min_steps, max_steps, eval_ratio):
'Returns the Pipeline instance which creates the RNN dataset.\n\n Args:\n config: An EventSequenceRnnConfig.\n min_steps: Minimum number of steps for an extracted sequence.\n max_steps: Maximum number of steps for an extracted sequence.\n eval_ratio: Fraction of input to set aside for evaluation set.\n\n Returns:\n A pipeline.Pipeline instance.\n '
transposition_range = list(range((- 4), 5))
partitioner = pipelines_common.RandomPartition(music_pb2.NoteSequence, ['eval_pianoroll_tracks', 'training_pianoroll_tracks'], [eval_ratio])
dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}
for mode in ['eval', 'training']:
time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(name=('TimeChangeSplitter_' + mode))
quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=config.steps_per_quarter, name=('Quantizer_' + mode))
transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(transposition_range, name=('TranspositionPipeline_' + mode))
pianoroll_extractor = PianorollSequenceExtractor(min_steps=min_steps, max_steps=max_steps, name=('PianorollExtractor_' + mode))
encoder_pipeline = event_sequence_pipeline.EncoderPipeline(PianorollSequence, config.encoder_decoder, name=('EncoderPipeline_' + mode))
dag[time_change_splitter] = partitioner[(mode + '_pianoroll_tracks')]
dag[quantizer] = time_change_splitter
dag[transposition_pipeline] = quantizer
dag[pianoroll_extractor] = transposition_pipeline
dag[encoder_pipeline] = pianoroll_extractor
dag[dag_pipeline.DagOutput((mode + '_pianoroll_tracks'))] = encoder_pipeline
return dag_pipeline.DAGPipeline(dag)
|
Returns the Pipeline instance which creates the RNN dataset.
Args:
config: An EventSequenceRnnConfig.
min_steps: Minimum number of steps for an extracted sequence.
max_steps: Maximum number of steps for an extracted sequence.
eval_ratio: Fraction of input to set aside for evaluation set.
Returns:
A pipeline.Pipeline instance.
|
magenta/pipelines/pianoroll_pipeline.py
|
get_pipeline
|
sandutsar/magenta
| 16,143 |
python
|
def get_pipeline(config, min_steps, max_steps, eval_ratio):
'Returns the Pipeline instance which creates the RNN dataset.\n\n Args:\n config: An EventSequenceRnnConfig.\n min_steps: Minimum number of steps for an extracted sequence.\n max_steps: Maximum number of steps for an extracted sequence.\n eval_ratio: Fraction of input to set aside for evaluation set.\n\n Returns:\n A pipeline.Pipeline instance.\n '
transposition_range = list(range((- 4), 5))
partitioner = pipelines_common.RandomPartition(music_pb2.NoteSequence, ['eval_pianoroll_tracks', 'training_pianoroll_tracks'], [eval_ratio])
dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}
for mode in ['eval', 'training']:
time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(name=('TimeChangeSplitter_' + mode))
quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=config.steps_per_quarter, name=('Quantizer_' + mode))
transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(transposition_range, name=('TranspositionPipeline_' + mode))
pianoroll_extractor = PianorollSequenceExtractor(min_steps=min_steps, max_steps=max_steps, name=('PianorollExtractor_' + mode))
encoder_pipeline = event_sequence_pipeline.EncoderPipeline(PianorollSequence, config.encoder_decoder, name=('EncoderPipeline_' + mode))
dag[time_change_splitter] = partitioner[(mode + '_pianoroll_tracks')]
dag[quantizer] = time_change_splitter
dag[transposition_pipeline] = quantizer
dag[pianoroll_extractor] = transposition_pipeline
dag[encoder_pipeline] = pianoroll_extractor
dag[dag_pipeline.DagOutput((mode + '_pianoroll_tracks'))] = encoder_pipeline
return dag_pipeline.DAGPipeline(dag)
|
def get_pipeline(config, min_steps, max_steps, eval_ratio):
'Returns the Pipeline instance which creates the RNN dataset.\n\n Args:\n config: An EventSequenceRnnConfig.\n min_steps: Minimum number of steps for an extracted sequence.\n max_steps: Maximum number of steps for an extracted sequence.\n eval_ratio: Fraction of input to set aside for evaluation set.\n\n Returns:\n A pipeline.Pipeline instance.\n '
transposition_range = list(range((- 4), 5))
partitioner = pipelines_common.RandomPartition(music_pb2.NoteSequence, ['eval_pianoroll_tracks', 'training_pianoroll_tracks'], [eval_ratio])
dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}
for mode in ['eval', 'training']:
time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(name=('TimeChangeSplitter_' + mode))
quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=config.steps_per_quarter, name=('Quantizer_' + mode))
transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(transposition_range, name=('TranspositionPipeline_' + mode))
pianoroll_extractor = PianorollSequenceExtractor(min_steps=min_steps, max_steps=max_steps, name=('PianorollExtractor_' + mode))
encoder_pipeline = event_sequence_pipeline.EncoderPipeline(PianorollSequence, config.encoder_decoder, name=('EncoderPipeline_' + mode))
dag[time_change_splitter] = partitioner[(mode + '_pianoroll_tracks')]
dag[quantizer] = time_change_splitter
dag[transposition_pipeline] = quantizer
dag[pianoroll_extractor] = transposition_pipeline
dag[encoder_pipeline] = pianoroll_extractor
dag[dag_pipeline.DagOutput((mode + '_pianoroll_tracks'))] = encoder_pipeline
return dag_pipeline.DAGPipeline(dag)<|docstring|>Returns the Pipeline instance which creates the RNN dataset.
Args:
config: An EventSequenceRnnConfig.
min_steps: Minimum number of steps for an extracted sequence.
max_steps: Maximum number of steps for an extracted sequence.
eval_ratio: Fraction of input to set aside for evaluation set.
Returns:
A pipeline.Pipeline instance.<|endoftext|>
|
6a1ee292af6a86f2fdab70c41389c61af52381742ed273bafdd9a04372d43158
|
def extract_pianoroll_sequences(quantized_sequence, start_step=0, min_steps_discard=None, max_steps_discard=None, max_steps_truncate=None):
'Extracts a polyphonic track from the given quantized NoteSequence.\n\n Currently, this extracts only one pianoroll from a given track.\n\n Args:\n quantized_sequence: A quantized NoteSequence.\n start_step: Start extracting a sequence at this time step. Assumed\n to be the beginning of a bar.\n min_steps_discard: Minimum length of tracks in steps. Shorter tracks are\n discarded.\n max_steps_discard: Maximum length of tracks in steps. Longer tracks are\n discarded. Mutually exclusive with `max_steps_truncate`.\n max_steps_truncate: Maximum length of tracks in steps. Longer tracks are\n truncated. Mutually exclusive with `max_steps_discard`.\n\n Returns:\n pianoroll_seqs: A python list of PianorollSequence instances.\n stats: A dictionary mapping string names to `statistics.Statistic` objects.\n\n Raises:\n ValueError: If both `max_steps_discard` and `max_steps_truncate` are\n specified.\n '
if ((max_steps_discard, max_steps_truncate).count(None) == 0):
raise ValueError('Only one of `max_steps_discard` and `max_steps_truncate` can be specified.')
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
stats = dict(((stat_name, statistics.Counter(stat_name)) for stat_name in ['pianoroll_tracks_truncated_too_long', 'pianoroll_tracks_discarded_too_short', 'pianoroll_tracks_discarded_too_long', 'pianoroll_tracks_discarded_more_than_1_program']))
steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
stats['pianoroll_track_lengths_in_bars'] = statistics.Histogram('pianoroll_track_lengths_in_bars', [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])
programs = set()
for note in quantized_sequence.notes:
programs.add(note.program)
if (len(programs) > 1):
stats['pianoroll_tracks_discarded_more_than_1_program'].increment()
return ([], list(stats.values()))
pianoroll_seq = PianorollSequence(quantized_sequence=quantized_sequence, start_step=start_step)
pianoroll_seqs = []
num_steps = pianoroll_seq.num_steps
if ((min_steps_discard is not None) and (num_steps < min_steps_discard)):
stats['pianoroll_tracks_discarded_too_short'].increment()
elif ((max_steps_discard is not None) and (num_steps > max_steps_discard)):
stats['pianoroll_tracks_discarded_too_long'].increment()
else:
if ((max_steps_truncate is not None) and (num_steps > max_steps_truncate)):
stats['pianoroll_tracks_truncated_too_long'].increment()
pianoroll_seq.set_length(max_steps_truncate)
pianoroll_seqs.append(pianoroll_seq)
stats['pianoroll_track_lengths_in_bars'].increment((num_steps // steps_per_bar))
return (pianoroll_seqs, list(stats.values()))
|
Extracts a polyphonic track from the given quantized NoteSequence.
Currently, this extracts only one pianoroll from a given track.
Args:
quantized_sequence: A quantized NoteSequence.
start_step: Start extracting a sequence at this time step. Assumed
to be the beginning of a bar.
min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
discarded.
max_steps_discard: Maximum length of tracks in steps. Longer tracks are
discarded. Mutually exclusive with `max_steps_truncate`.
max_steps_truncate: Maximum length of tracks in steps. Longer tracks are
truncated. Mutually exclusive with `max_steps_discard`.
Returns:
pianoroll_seqs: A python list of PianorollSequence instances.
stats: A dictionary mapping string names to `statistics.Statistic` objects.
Raises:
ValueError: If both `max_steps_discard` and `max_steps_truncate` are
specified.
|
magenta/pipelines/pianoroll_pipeline.py
|
extract_pianoroll_sequences
|
sandutsar/magenta
| 16,143 |
python
|
def extract_pianoroll_sequences(quantized_sequence, start_step=0, min_steps_discard=None, max_steps_discard=None, max_steps_truncate=None):
'Extracts a polyphonic track from the given quantized NoteSequence.\n\n Currently, this extracts only one pianoroll from a given track.\n\n Args:\n quantized_sequence: A quantized NoteSequence.\n start_step: Start extracting a sequence at this time step. Assumed\n to be the beginning of a bar.\n min_steps_discard: Minimum length of tracks in steps. Shorter tracks are\n discarded.\n max_steps_discard: Maximum length of tracks in steps. Longer tracks are\n discarded. Mutually exclusive with `max_steps_truncate`.\n max_steps_truncate: Maximum length of tracks in steps. Longer tracks are\n truncated. Mutually exclusive with `max_steps_discard`.\n\n Returns:\n pianoroll_seqs: A python list of PianorollSequence instances.\n stats: A dictionary mapping string names to `statistics.Statistic` objects.\n\n Raises:\n ValueError: If both `max_steps_discard` and `max_steps_truncate` are\n specified.\n '
if ((max_steps_discard, max_steps_truncate).count(None) == 0):
raise ValueError('Only one of `max_steps_discard` and `max_steps_truncate` can be specified.')
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
stats = dict(((stat_name, statistics.Counter(stat_name)) for stat_name in ['pianoroll_tracks_truncated_too_long', 'pianoroll_tracks_discarded_too_short', 'pianoroll_tracks_discarded_too_long', 'pianoroll_tracks_discarded_more_than_1_program']))
steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
stats['pianoroll_track_lengths_in_bars'] = statistics.Histogram('pianoroll_track_lengths_in_bars', [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])
programs = set()
for note in quantized_sequence.notes:
programs.add(note.program)
if (len(programs) > 1):
stats['pianoroll_tracks_discarded_more_than_1_program'].increment()
return ([], list(stats.values()))
pianoroll_seq = PianorollSequence(quantized_sequence=quantized_sequence, start_step=start_step)
pianoroll_seqs = []
num_steps = pianoroll_seq.num_steps
if ((min_steps_discard is not None) and (num_steps < min_steps_discard)):
stats['pianoroll_tracks_discarded_too_short'].increment()
elif ((max_steps_discard is not None) and (num_steps > max_steps_discard)):
stats['pianoroll_tracks_discarded_too_long'].increment()
else:
if ((max_steps_truncate is not None) and (num_steps > max_steps_truncate)):
stats['pianoroll_tracks_truncated_too_long'].increment()
pianoroll_seq.set_length(max_steps_truncate)
pianoroll_seqs.append(pianoroll_seq)
stats['pianoroll_track_lengths_in_bars'].increment((num_steps // steps_per_bar))
return (pianoroll_seqs, list(stats.values()))
|
def extract_pianoroll_sequences(quantized_sequence, start_step=0, min_steps_discard=None, max_steps_discard=None, max_steps_truncate=None):
'Extracts a polyphonic track from the given quantized NoteSequence.\n\n Currently, this extracts only one pianoroll from a given track.\n\n Args:\n quantized_sequence: A quantized NoteSequence.\n start_step: Start extracting a sequence at this time step. Assumed\n to be the beginning of a bar.\n min_steps_discard: Minimum length of tracks in steps. Shorter tracks are\n discarded.\n max_steps_discard: Maximum length of tracks in steps. Longer tracks are\n discarded. Mutually exclusive with `max_steps_truncate`.\n max_steps_truncate: Maximum length of tracks in steps. Longer tracks are\n truncated. Mutually exclusive with `max_steps_discard`.\n\n Returns:\n pianoroll_seqs: A python list of PianorollSequence instances.\n stats: A dictionary mapping string names to `statistics.Statistic` objects.\n\n Raises:\n ValueError: If both `max_steps_discard` and `max_steps_truncate` are\n specified.\n '
if ((max_steps_discard, max_steps_truncate).count(None) == 0):
raise ValueError('Only one of `max_steps_discard` and `max_steps_truncate` can be specified.')
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
stats = dict(((stat_name, statistics.Counter(stat_name)) for stat_name in ['pianoroll_tracks_truncated_too_long', 'pianoroll_tracks_discarded_too_short', 'pianoroll_tracks_discarded_too_long', 'pianoroll_tracks_discarded_more_than_1_program']))
steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
stats['pianoroll_track_lengths_in_bars'] = statistics.Histogram('pianoroll_track_lengths_in_bars', [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])
programs = set()
for note in quantized_sequence.notes:
programs.add(note.program)
if (len(programs) > 1):
stats['pianoroll_tracks_discarded_more_than_1_program'].increment()
return ([], list(stats.values()))
pianoroll_seq = PianorollSequence(quantized_sequence=quantized_sequence, start_step=start_step)
pianoroll_seqs = []
num_steps = pianoroll_seq.num_steps
if ((min_steps_discard is not None) and (num_steps < min_steps_discard)):
stats['pianoroll_tracks_discarded_too_short'].increment()
elif ((max_steps_discard is not None) and (num_steps > max_steps_discard)):
stats['pianoroll_tracks_discarded_too_long'].increment()
else:
if ((max_steps_truncate is not None) and (num_steps > max_steps_truncate)):
stats['pianoroll_tracks_truncated_too_long'].increment()
pianoroll_seq.set_length(max_steps_truncate)
pianoroll_seqs.append(pianoroll_seq)
stats['pianoroll_track_lengths_in_bars'].increment((num_steps // steps_per_bar))
return (pianoroll_seqs, list(stats.values()))<|docstring|>Extracts a polyphonic track from the given quantized NoteSequence.
Currently, this extracts only one pianoroll from a given track.
Args:
quantized_sequence: A quantized NoteSequence.
start_step: Start extracting a sequence at this time step. Assumed
to be the beginning of a bar.
min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
discarded.
max_steps_discard: Maximum length of tracks in steps. Longer tracks are
discarded. Mutually exclusive with `max_steps_truncate`.
max_steps_truncate: Maximum length of tracks in steps. Longer tracks are
truncated. Mutually exclusive with `max_steps_discard`.
Returns:
pianoroll_seqs: A python list of PianorollSequence instances.
stats: A dictionary mapping string names to `statistics.Statistic` objects.
Raises:
ValueError: If both `max_steps_discard` and `max_steps_truncate` are
specified.<|endoftext|>
|
bca7698c9d8b821ac5dfdba08ed55f722dd2a9493b1d264a41645a1e56cf0457
|
def lambda_handler(event, context):
'Handle a Pybossa webhook\n\n Parameters\n ----------\n event: dict, required\n\n context: object, required\n Lambda Context runtime methods and attributes\n Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html\n\n Returns\n ------\n '
path = event.get('path', '')
project_slug = os.path.basename(path)
headers = event.get('multiValueHeaders', {})
body = event.get('body', '')
if (('httpMethod' in event) and (event['httpMethod'] == 'GET')):
return simple_response(200, '200 OK', payload='Ready for webhook.')
if (('httpMethod' in event) and (event['httpMethod'] == 'POST')):
try:
webhook_data = json.loads(body)
project_short_name = webhook_data['project_short_name']
project_id = webhook_data['project_id']
task_id = webhook_data['task_id']
taskrun_id = int(webhook_data.get('taskrun_id', (- 1)))
user_id = int(webhook_data.get('user_id', (- 1)))
result_id = webhook_data['result_id']
event = webhook_data['event']
except (json.decoder.JSONDecodeError, KeyError, ValueError, TypeError):
return simple_response(400, 'Bad Request')
if (taskrun_id != (- 1)):
print(f'Notified of taskrun {taskrun_id} by user {user_id} in project {project_short_name}. Crediting to SciStarter project {project_slug}.')
try:
record_participation(taskrun_id, project_slug)
return simple_response(200, '200 OK', payload='Notification sent.')
except Exception as e:
return simple_response(400, 'Bad Request', payload=f'Exception: {str(e)}')
else:
return simple_response(400, 'Bad Request', payload='Webhook did not provide taskrun_id.')
print(path)
print(json.dumps(headers))
print(body)
return simple_response(400, 'Bad Request')
|
Handle a Pybossa webhook
Parameters
----------
event: dict, required
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
|
vdashboard/app.py
|
lambda_handler
|
Goodly/volunteerdashboard
| 0 |
python
|
def lambda_handler(event, context):
'Handle a Pybossa webhook\n\n Parameters\n ----------\n event: dict, required\n\n context: object, required\n Lambda Context runtime methods and attributes\n Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html\n\n Returns\n ------\n '
path = event.get('path', )
project_slug = os.path.basename(path)
headers = event.get('multiValueHeaders', {})
body = event.get('body', )
if (('httpMethod' in event) and (event['httpMethod'] == 'GET')):
return simple_response(200, '200 OK', payload='Ready for webhook.')
if (('httpMethod' in event) and (event['httpMethod'] == 'POST')):
try:
webhook_data = json.loads(body)
project_short_name = webhook_data['project_short_name']
project_id = webhook_data['project_id']
task_id = webhook_data['task_id']
taskrun_id = int(webhook_data.get('taskrun_id', (- 1)))
user_id = int(webhook_data.get('user_id', (- 1)))
result_id = webhook_data['result_id']
event = webhook_data['event']
except (json.decoder.JSONDecodeError, KeyError, ValueError, TypeError):
return simple_response(400, 'Bad Request')
if (taskrun_id != (- 1)):
print(f'Notified of taskrun {taskrun_id} by user {user_id} in project {project_short_name}. Crediting to SciStarter project {project_slug}.')
try:
record_participation(taskrun_id, project_slug)
return simple_response(200, '200 OK', payload='Notification sent.')
except Exception as e:
return simple_response(400, 'Bad Request', payload=f'Exception: {str(e)}')
else:
return simple_response(400, 'Bad Request', payload='Webhook did not provide taskrun_id.')
print(path)
print(json.dumps(headers))
print(body)
return simple_response(400, 'Bad Request')
|
def lambda_handler(event, context):
'Handle a Pybossa webhook\n\n Parameters\n ----------\n event: dict, required\n\n context: object, required\n Lambda Context runtime methods and attributes\n Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html\n\n Returns\n ------\n '
path = event.get('path', )
project_slug = os.path.basename(path)
headers = event.get('multiValueHeaders', {})
body = event.get('body', )
if (('httpMethod' in event) and (event['httpMethod'] == 'GET')):
return simple_response(200, '200 OK', payload='Ready for webhook.')
if (('httpMethod' in event) and (event['httpMethod'] == 'POST')):
try:
webhook_data = json.loads(body)
project_short_name = webhook_data['project_short_name']
project_id = webhook_data['project_id']
task_id = webhook_data['task_id']
taskrun_id = int(webhook_data.get('taskrun_id', (- 1)))
user_id = int(webhook_data.get('user_id', (- 1)))
result_id = webhook_data['result_id']
event = webhook_data['event']
except (json.decoder.JSONDecodeError, KeyError, ValueError, TypeError):
return simple_response(400, 'Bad Request')
if (taskrun_id != (- 1)):
print(f'Notified of taskrun {taskrun_id} by user {user_id} in project {project_short_name}. Crediting to SciStarter project {project_slug}.')
try:
record_participation(taskrun_id, project_slug)
return simple_response(200, '200 OK', payload='Notification sent.')
except Exception as e:
return simple_response(400, 'Bad Request', payload=f'Exception: {str(e)}')
else:
return simple_response(400, 'Bad Request', payload='Webhook did not provide taskrun_id.')
print(path)
print(json.dumps(headers))
print(body)
return simple_response(400, 'Bad Request')<|docstring|>Handle a Pybossa webhook
Parameters
----------
event: dict, required
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------<|endoftext|>
|
e5a7f3bf9aecfbae28731af6a7155dbbe6f1959b839f3169707aec8c7d5ddbce
|
def test_collation(ldml):
' Test that index exemplar is sorted according to the default collation '
if iscldr(ldml):
return
filename = os.path.basename(ldml.ldml.fname)
defcol = ldml.ldml.root.find('.//collations/defaultCollation')
defcollation = (defcol.text if (defcol is not None) else 'standard')
col_el = ldml.ldml.root.find(((".//collations/collation[@type='" + defcollation) + "']/cr"))
col = (col_el.text if (col_el is not None) else '')
try:
rbc = icu.RuleBasedCollator(col)
except icu.ICUError:
assert False, (filename + ' has invalid ICU collation')
return
index_el = ldml.ldml.root.find('.//characters/exemplarCharacters[@type="index"]')
if (index_el == None):
return
index_list_raw = index_el.text[1:(- 1)].strip().split(' ')
index_list = [re.sub(curlybraces, '\\1', c) for c in index_list_raw]
sort_list = sorted(index_list, key=rbc.getSortKey)
assert (index_list == sort_list), (filename + ' index exemplar inconsistent with collation')
|
Test that index exemplar is sorted according to the default collation
|
tests/test_collation.py
|
test_collation
|
silnrsi/sldr
| 11 |
python
|
def test_collation(ldml):
' '
if iscldr(ldml):
return
filename = os.path.basename(ldml.ldml.fname)
defcol = ldml.ldml.root.find('.//collations/defaultCollation')
defcollation = (defcol.text if (defcol is not None) else 'standard')
col_el = ldml.ldml.root.find(((".//collations/collation[@type='" + defcollation) + "']/cr"))
col = (col_el.text if (col_el is not None) else )
try:
rbc = icu.RuleBasedCollator(col)
except icu.ICUError:
assert False, (filename + ' has invalid ICU collation')
return
index_el = ldml.ldml.root.find('.//characters/exemplarCharacters[@type="index"]')
if (index_el == None):
return
index_list_raw = index_el.text[1:(- 1)].strip().split(' ')
index_list = [re.sub(curlybraces, '\\1', c) for c in index_list_raw]
sort_list = sorted(index_list, key=rbc.getSortKey)
assert (index_list == sort_list), (filename + ' index exemplar inconsistent with collation')
|
def test_collation(ldml):
' '
if iscldr(ldml):
return
filename = os.path.basename(ldml.ldml.fname)
defcol = ldml.ldml.root.find('.//collations/defaultCollation')
defcollation = (defcol.text if (defcol is not None) else 'standard')
col_el = ldml.ldml.root.find(((".//collations/collation[@type='" + defcollation) + "']/cr"))
col = (col_el.text if (col_el is not None) else )
try:
rbc = icu.RuleBasedCollator(col)
except icu.ICUError:
assert False, (filename + ' has invalid ICU collation')
return
index_el = ldml.ldml.root.find('.//characters/exemplarCharacters[@type="index"]')
if (index_el == None):
return
index_list_raw = index_el.text[1:(- 1)].strip().split(' ')
index_list = [re.sub(curlybraces, '\\1', c) for c in index_list_raw]
sort_list = sorted(index_list, key=rbc.getSortKey)
assert (index_list == sort_list), (filename + ' index exemplar inconsistent with collation')<|docstring|>Test that index exemplar is sorted according to the default collation<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.