_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q275600
|
Checklist.update_checklist
|
test
|
def update_checklist(self, name):
'''
Update the current checklist. Returns a new Checklist object.
'''
checklist_json = self.fetch_json(
uri_path=self.base_uri,
|
python
|
{
"resource": ""
}
|
q275601
|
Checklist.add_item
|
test
|
def add_item(self, query_params=None):
'''
Add an item to this checklist. Returns a dictionary of values of new
item.
'''
return self.fetch_json(
|
python
|
{
"resource": ""
}
|
q275602
|
Checklist.remove_item
|
test
|
def remove_item(self, item_id):
'''
Deletes an item from this checklist.
'''
return self.fetch_json(
|
python
|
{
"resource": ""
}
|
q275603
|
ChecklistItem.update_name
|
test
|
def update_name( self, name ):
"""
Rename the current checklist item. Returns a new ChecklistItem object.
"""
checklistitem_json = self.fetch_json(
uri_path = self.base_uri + '/name',
http_method = 'PUT',
|
python
|
{
"resource": ""
}
|
q275604
|
ChecklistItem.update_state
|
test
|
def update_state(self, state):
"""
Set the state of the current checklist item. Returns a new ChecklistItem object.
"""
checklistitem_json = self.fetch_json(
uri_path = self.base_uri + '/state',
http_method = 'PUT',
|
python
|
{
"resource": ""
}
|
q275605
|
Client.add_authorisation
|
test
|
def add_authorisation(self, query_params):
'''
Adds the API key and user auth token to the query parameters
'''
query_params['key']
|
python
|
{
"resource": ""
}
|
q275606
|
Client.check_errors
|
test
|
def check_errors(self, uri, response):
'''
Check HTTP reponse for known errors
'''
if response.status == 401:
raise trolly.Unauthorised(uri, response)
|
python
|
{
"resource": ""
}
|
q275607
|
Client.build_uri
|
test
|
def build_uri(self, path, query_params):
'''
Build the URI for the API call.
'''
url =
|
python
|
{
"resource": ""
}
|
q275608
|
Client.fetch_json
|
test
|
def fetch_json(self, uri_path, http_method='GET', query_params=None,
body=None, headers=None):
'''
Make a call to Trello API and capture JSON response. Raises an error
when it fails.
Returns:
dict: Dictionary with the JSON data
'''
query_params = query_params or {}
headers = headers or {}
query_params = self.add_authorisation(query_params)
|
python
|
{
"resource": ""
}
|
q275609
|
Client.create_organisation
|
test
|
def create_organisation(self, organisation_json):
'''
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
'''
return trolly.organisation.Organisation(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275610
|
Client.create_board
|
test
|
def create_board(self, board_json):
'''
Create Board object from a JSON object
Returns:
Board: The board from the given `board_json`.
'''
return trolly.board.Board(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275611
|
Client.create_label
|
test
|
def create_label(self, label_json):
'''
Create Label object from JSON object
Returns:
Label: The label from the given `label_json`.
'''
return trolly.label.Label(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275612
|
Client.create_list
|
test
|
def create_list(self, list_json):
'''
Create List object from JSON object
Returns:
List: The list from the given `list_json`.
'''
return trolly.list.List(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275613
|
Client.create_card
|
test
|
def create_card(self, card_json):
'''
Create a Card object from JSON object
Returns:
Card: The card from the given `card_json`.
'''
return trolly.card.Card(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275614
|
Client.create_checklist
|
test
|
def create_checklist(self, checklist_json):
'''
Create a Checklist object from JSON object
Returns:
Checklist: The checklist from the given `checklist_json`.
'''
return trolly.checklist.Checklist(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275615
|
Client.create_member
|
test
|
def create_member(self, member_json):
'''
Create a Member object from JSON object
Returns:
Member: The member from the given `member_json`.
'''
return trolly.member.Member(
trello_client=self,
|
python
|
{
"resource": ""
}
|
q275616
|
Client.get_organisation
|
test
|
def get_organisation(self, id, name=None):
'''
Get an organisation
Returns:
Organisation: The organisation with the given
|
python
|
{
"resource": ""
}
|
q275617
|
Client.get_board
|
test
|
def get_board(self, id, name=None):
'''
Get a board
Returns:
Board: The board with the given
|
python
|
{
"resource": ""
}
|
q275618
|
Client.get_list
|
test
|
def get_list(self, id, name=None):
'''
Get a list
Returns:
List: The list with the given
|
python
|
{
"resource": ""
}
|
q275619
|
Client.get_card
|
test
|
def get_card(self, id, name=None):
'''
Get a card
Returns:
Card: The card with the given
|
python
|
{
"resource": ""
}
|
q275620
|
Client.get_checklist
|
test
|
def get_checklist(self, id, name=None):
'''
Get a checklist
Returns:
Checklist: The checklist with the given
|
python
|
{
"resource": ""
}
|
q275621
|
Client.get_member
|
test
|
def get_member(self, id='me', name=None):
'''
Get a member or your current member if `id` wasn't given.
Returns:
Member: The member with the given `id`, defaults to the
|
python
|
{
"resource": ""
}
|
q275622
|
domain_from_url
|
test
|
def domain_from_url(url):
"""
Get root domain from url.
Will prune away query strings, url paths, protocol prefix and sub-domains
Exceptions will be raised on invalid urls
"""
|
python
|
{
"resource": ""
}
|
q275623
|
to_raw_text_markupless
|
test
|
def to_raw_text_markupless(text, keep_whitespace=False, normalize_ascii=True):
"""
A generator to convert raw text segments, without xml to a
list of words without any markup.
Additionally dates are replaced by `7777` for normalization.
Arguments
---------
text: str, input text to tokenize, strip of markup.
keep_whitespace : bool, should the output retain the
whitespace of the input (so that char offsets in the
output correspond to those in the input).
Returns
-------
|
python
|
{
"resource": ""
}
|
q275624
|
to_raw_text
|
test
|
def to_raw_text(text, keep_whitespace=False, normalize_ascii=True):
"""
A generator to convert raw text segments, with xml, and other
non-textual content to a list of words without any markup.
Additionally dates are replaced by `7777` for normalization.
Arguments
---------
text: str, input text to tokenize, strip of markup.
keep_whitespace : bool, should the output retain the
whitespace of the input (so that char offsets in the
output correspond to those in the input).
Returns
-------
generator<list<list<str>>>, a generator for sentences, with
within each sentence a list of the words separated.
"""
out = text
out = _remove_urls(text)
|
python
|
{
"resource": ""
}
|
q275625
|
to_raw_text_pairings
|
test
|
def to_raw_text_pairings(text, keep_whitespace=False, normalize_ascii=True):
"""
A generator to convert raw text segments, with xml, and other
non-textual content to a list of words without any markup.
Additionally dates are replaced by `7777` for normalization,
along with wikipedia anchors kept.
Arguments
---------
text: str, input text to tokenize, strip of markup.
keep_whitespace : bool, should the output retain the
whitespace of the input (so that char offsets in the
output correspond to those in the input).
Returns
-------
generator<list<list<str>>>, a generator for sentences, with
|
python
|
{
"resource": ""
}
|
q275626
|
Keyring.set_password
|
test
|
def set_password(self, service, username, password):
"""Write the password in the file.
"""
assoc = self._generate_assoc(service, username)
# encrypt the password
|
python
|
{
"resource": ""
}
|
q275627
|
split_with_locations
|
test
|
def split_with_locations(text, locations):
"""
Use an integer list to split the string
contained in `text`.
Arguments:
----------
text : str, same length as locations.
locations : list<int>, contains values
'SHOULD_SPLIT', 'UNDECIDED', and
'SHOULD_NOT_SPLIT'. Will create
strings between each 'SHOULD_SPLIT'
locations.
Returns:
--------
Generator<str> : the substrings of text
corresponding to the slices given
|
python
|
{
"resource": ""
}
|
q275628
|
mark_regex
|
test
|
def mark_regex(regex, text, split_locations):
"""
Regex that adds a 'SHOULD_SPLIT' marker at the end
location of each matching group of the given regex.
Arguments
---------
regex : re.Expression
text : str, same length as split_locations
split_locations : list<int>, split decisions.
"""
|
python
|
{
"resource": ""
}
|
q275629
|
mark_begin_end_regex
|
test
|
def mark_begin_end_regex(regex, text, split_locations):
"""
Regex that adds a 'SHOULD_SPLIT' marker at the end
location of each matching group of the given regex,
and adds a 'SHOULD_SPLIT' at the beginning of the
matching group. Each character within the matching
group will be marked as 'SHOULD_NOT_SPLIT'.
Arguments
---------
regex : re.Expression
text : str, same length as split_locations
split_locations : list<int>, split decisions.
"""
for match in regex.finditer(text):
end_match = match.end()
begin_match = match.start()
for i in
|
python
|
{
"resource": ""
}
|
q275630
|
main
|
test
|
def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
try:
|
python
|
{
"resource": ""
}
|
q275631
|
ArgonAESEncryption._create_cipher
|
test
|
def _create_cipher(self, password, salt, nonce = None):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from argon2.low_level import hash_secret_raw, Type
from Crypto.Cipher import AES
aesmode = self._get_mode(self.aesmode)
if aesmode is None:
|
python
|
{
"resource": ""
}
|
q275632
|
ArgonAESEncryption._get_mode
|
test
|
def _get_mode(mode = None):
"""
Return the AES mode, or a list of valid AES modes, if mode == None
"""
from Crypto.Cipher import AES
AESModeMap = {
'CCM': AES.MODE_CCM,
'EAX': AES.MODE_EAX,
|
python
|
{
"resource": ""
}
|
q275633
|
CryptFileKeyring.priority
|
test
|
def priority(self):
"""
Applicable for all platforms, where the schemes, that are integrated
with your environment, does not fit.
"""
try:
__import__('argon2.low_level')
except ImportError: # pragma: no cover
|
python
|
{
"resource": ""
}
|
q275634
|
CryptFileKeyring._check_scheme
|
test
|
def _check_scheme(self, config):
"""
check for a valid scheme
raise AttributeError if missing
raise ValueError if not valid
"""
try:
scheme = config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('scheme'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
raise AttributeError("Encryption scheme missing")
# extract AES mode
aesmode = scheme[-3:]
if aesmode not in self._get_mode():
raise ValueError("Encryption scheme invalid: %s" % (aesmode))
# setup AES mode
|
python
|
{
"resource": ""
}
|
q275635
|
MQTTService.onPublish
|
test
|
def onPublish(self, topic, payload, qos, dup, retain, msgId):
'''
Callback Receiving messages from publisher
|
python
|
{
"resource": ""
}
|
q275636
|
MQTTFactory.makeId
|
test
|
def makeId(self):
'''Produce ids for Protocol packets, outliving their sessions'''
self.id = (self.id + 1) % 65536
|
python
|
{
"resource": ""
}
|
q275637
|
BaseState.connect
|
test
|
def connect(self, request):
'''
Send a CONNECT control packet.
'''
state = self.__class__.__name__
|
python
|
{
"resource": ""
}
|
q275638
|
BaseState.handleCONNACK
|
test
|
def handleCONNACK(self, response):
'''
Handles CONNACK packet from the server
'''
state = self.__class__.__name__
|
python
|
{
"resource": ""
}
|
q275639
|
encodeString
|
test
|
def encodeString(string):
'''
Encode an UTF-8 string into MQTT format.
Returns a bytearray
'''
encoded = bytearray(2)
encoded.extend(bytearray(string, encoding='utf-8'))
l = len(encoded)-2
if(l
|
python
|
{
"resource": ""
}
|
q275640
|
decodeString
|
test
|
def decodeString(encoded):
'''
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
'''
length =
|
python
|
{
"resource": ""
}
|
q275641
|
encode16Int
|
test
|
def encode16Int(value):
'''
Encodes a 16 bit unsigned integer into MQTT format.
Returns a bytearray
'''
value = int(value)
|
python
|
{
"resource": ""
}
|
q275642
|
encodeLength
|
test
|
def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
|
python
|
{
"resource": ""
}
|
q275643
|
decodeLength
|
test
|
def decodeLength(encoded):
'''
Decodes a variable length value defined in the MQTT protocol.
This value typically represents remaining field lengths
'''
value = 0
multiplier = 1
for i in encoded:
|
python
|
{
"resource": ""
}
|
q275644
|
DISCONNECT.encode
|
test
|
def encode(self):
'''
Encode and store a DISCONNECT control packet.
'''
|
python
|
{
"resource": ""
}
|
q275645
|
CONNECT.encode
|
test
|
def encode(self):
'''
Encode and store a CONNECT control packet.
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded username string exceeds 65535 bytes.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
header[0] = 0x10 # packet code
# ---- Variable header encoding section -----
varHeader.extend(encodeString(self.version['tag']))
varHeader.append(self.version['level']) # protocol Level
flags = (self.cleanStart << 1)
if self.willTopic is not None and self.willMessage is not None:
flags |= 0x04 | (self.willRetain << 5) | (self.willQoS << 3)
if self.username is not None:
flags |= 0x80
if self.password is not None:
flags |= 0x40
varHeader.append(flags)
varHeader.extend(encode16Int(self.keepalive))
# ------ Payload encoding section ----
payload.extend(encodeString(self.clientId))
if self.willTopic is not None and self.willMessage is not None:
payload.extend(encodeString(self.willTopic))
|
python
|
{
"resource": ""
}
|
q275646
|
CONNECT.decode
|
test
|
def decode(self, packet):
'''
Decode a CONNECT control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
# Variable Header
version_str, packet_remaining = decodeString(packet_remaining)
version_id = int(packet_remaining[0])
if version_id == v31['level']:
self.version = v31
else:
self.version = v311
flags = packet_remaining[1]
self.cleanStart = (flags & 0x02) != 0
willFlag = (flags & 0x04) != 0
willQoS = (flags >> 3) & 0x03
willRetain = (flags & 0x20) != 0
userFlag = (flags & 0x80) != 0
passFlag = (flags & 0x40) != 0
packet_remaining = packet_remaining[2:]
self.keepalive = decode16Int(packet_remaining)
# Payload
packet_remaining = packet_remaining[2:]
self.clientId, packet_remaining = decodeString(packet_remaining)
|
python
|
{
"resource": ""
}
|
q275647
|
CONNACK.encode
|
test
|
def encode(self):
'''
Encode and store a CONNACK control packet.
'''
header = bytearray(1)
varHeader = bytearray(2)
header[0] = 0x20
varHeader[0] = self.session
varHeader[1] = self.resultCode
|
python
|
{
"resource": ""
}
|
q275648
|
CONNACK.decode
|
test
|
def decode(self, packet):
'''
Decode a CONNACK control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
|
python
|
{
"resource": ""
}
|
q275649
|
SUBSCRIBE.decode
|
test
|
def decode(self, packet):
'''
Decode a SUBSCRIBE control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
|
python
|
{
"resource": ""
}
|
q275650
|
SUBACK.encode
|
test
|
def encode(self):
'''
Encode and store a SUBACK control packet.
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0x90
for code in self.granted:
payload.append(code[0] | (0x80 if code[1] == True else 0x00))
|
python
|
{
"resource": ""
}
|
q275651
|
UNSUBSCRIBE.encode
|
test
|
def encode(self):
'''
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0xA2 # packet with QoS=1
for topic in self.topics:
|
python
|
{
"resource": ""
}
|
q275652
|
UNSUBSCRIBE.decode
|
test
|
def decode(self, packet):
'''
Decode a UNSUBACK control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
|
python
|
{
"resource": ""
}
|
q275653
|
UNSUBACK.encode
|
test
|
def encode(self):
'''
Encode and store an UNSUBACK control packet
'''
header = bytearray(1)
varHeader = encode16Int(self.msgId)
header[0] = 0xB0
|
python
|
{
"resource": ""
}
|
q275654
|
PUBLISH.encode
|
test
|
def encode(self):
'''
Encode and store a PUBLISH control packet.
@raise e: C{ValueError} if encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes.
@raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
if self.qos:
header[0] = 0x30 | self.retain | (self.qos << 1) | (self.dup << 3)
varHeader.extend(encodeString(self.topic)) # topic name
varHeader.extend(encode16Int(self.msgId)) # msgId should not be None
else:
header[0] = 0x30 | self.retain
varHeader.extend(encodeString(self.topic)) # topic name
|
python
|
{
"resource": ""
}
|
q275655
|
PUBLISH.decode
|
test
|
def decode(self, packet):
'''
Decode a PUBLISH control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.dup = (packet[0] & 0x08) == 0x08
self.qos = (packet[0] & 0x06) >> 1
self.retain = (packet[0] & 0x01) == 0x01
self.topic, _ = decodeString(packet_remaining)
topicLen = decode16Int(packet_remaining)
if self.qos:
|
python
|
{
"resource": ""
}
|
q275656
|
PUBREL.decode
|
test
|
def decode(self, packet):
'''
Decode a PUBREL control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
|
python
|
{
"resource": ""
}
|
q275657
|
API.get_url
|
test
|
def get_url(self, method=None, **kwargs):
"""Return url for call method.
:param method (optional): `str` method name.
:returns: `str` URL.
"""
|
python
|
{
"resource": ""
}
|
q275658
|
API.request
|
test
|
def request(self, method, **kwargs):
"""
Send request to API.
:param method: `str` method name.
:returns: `dict` response.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
|
python
|
{
"resource": ""
}
|
q275659
|
FileMPI.refresh
|
test
|
def refresh(self):
""" Refresh the list of blocks to the disk, collectively """
if self.comm.rank == 0:
|
python
|
{
"resource": ""
}
|
q275660
|
classifier.format_data
|
test
|
def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
|
python
|
{
"resource": ""
}
|
q275661
|
classifier.fitting_data
|
test
|
def fitting_data(self, data):
"""
Function to format data for cluster fitting.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
Returns
-------
A data array for initial cluster fitting.
|
python
|
{
"resource": ""
}
|
q275662
|
classifier.fit_kmeans
|
test
|
def fit_kmeans(self, data, n_clusters, **kwargs):
"""
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
|
python
|
{
"resource": ""
}
|
q275663
|
classifier.fit_meanshift
|
test
|
def fit_meanshift(self, data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Fit MeanShift clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
bandwidth : float
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
Whether or not to use 'bin_seeding'. See
|
python
|
{
"resource": ""
}
|
q275664
|
classifier.fit
|
test
|
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
|
python
|
{
"resource": ""
}
|
q275665
|
classifier.predict
|
test
|
def predict(self, data):
"""
Label new data with cluster identities.
Parameters
----------
data : dict
A data dict containing the same analytes used to
fit the classifier.
sort_by : str
The name of an analyte used to sort the resulting
clusters. If None, defaults to the first analyte
used in fitting.
Returns
-------
array of clusters the same length as the data.
|
python
|
{
"resource": ""
}
|
q275666
|
classifier.map_clusters
|
test
|
def map_clusters(self, size, sampled, clusters):
"""
Translate cluster identity back to original data size.
Parameters
----------
size : int
size of original dataset
sampled : array-like
integer array describing location of finite values
in original data.
clusters : array-like
integer array of cluster identities
Returns
-------
|
python
|
{
"resource": ""
}
|
q275667
|
classifier.sort_clusters
|
test
|
def sort_clusters(self, data, cs, sort_by):
"""
Sort clusters by the concentration of a particular analyte.
Parameters
----------
data : dict
A dataset containing sort_by as a key.
cs : array-like
An array of clusters, the same length as values of data.
sort_by : str
analyte to sort the clusters by
Returns
-------
array of clusters, sorted by mean value of sort_by analyte.
"""
# label the clusters according to their contents
sdat = data[sort_by]
|
python
|
{
"resource": ""
}
|
q275668
|
get_date
|
test
|
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to
|
python
|
{
"resource": ""
}
|
q275669
|
get_total_n_points
|
test
|
def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d
|
python
|
{
"resource": ""
}
|
q275670
|
get_total_time_span
|
test
|
def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
|
python
|
{
"resource": ""
}
|
q275671
|
unitpicker
|
test
|
def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
|
python
|
{
"resource": ""
}
|
q275672
|
pretty_element
|
test
|
def pretty_element(s):
"""
Returns formatted element name.
Parameters
----------
s : str
of format [A-Z][a-z]?[0-9]+
Returns
|
python
|
{
"resource": ""
}
|
q275673
|
analyte_2_namemass
|
test
|
def analyte_2_namemass(s):
"""
Converts analytes in format '27Al' to 'Al27'.
Parameters
----------
s : str
of format [A-z]{1,3}[0-9]{1,3}
Returns
-------
str
Name in format [0-9]{1,3}[A-z]{1,3}
"""
|
python
|
{
"resource": ""
}
|
q275674
|
analyte_2_massname
|
test
|
def analyte_2_massname(s):
"""
Converts analytes in format 'Al27' to '27Al'.
Parameters
----------
s : str
of format [0-9]{1,3}[A-z]{1,3}
Returns
-------
str
Name in format [A-z]{1,3}[0-9]{1,3}
"""
|
python
|
{
"resource": ""
}
|
q275675
|
collate_data
|
test
|
def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is
|
python
|
{
"resource": ""
}
|
q275676
|
enumerate_bool
|
test
|
def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
|
python
|
{
"resource": ""
}
|
q275677
|
tuples_2_bool
|
test
|
def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True
|
python
|
{
"resource": ""
}
|
q275678
|
fastsmooth
|
test
|
def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if
|
python
|
{
"resource": ""
}
|
q275679
|
fastgrad
|
test
|
def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
|
python
|
{
"resource": ""
}
|
q275680
|
findmins
|
test
|
def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and
|
python
|
{
"resource": ""
}
|
q275681
|
cluster_meanshift
|
test
|
def cluster_meanshift(data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Identify clusters using Meanshift algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
bandwidth : float or None
If None, bandwidth is estimated automatically using
sklean.cluster.estimate_bandwidth
|
python
|
{
"resource": ""
}
|
q275682
|
cluster_kmeans
|
test
|
def cluster_kmeans(data, n_clusters, **kwargs):
"""
Identify clusters using K - Means algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
n_clusters : int
The number of clusters expected in the data.
Returns
-------
|
python
|
{
"resource": ""
}
|
q275683
|
cluster_DBSCAN
|
test
|
def cluster_DBSCAN(data, eps=None, min_samples=None,
n_clusters=None, maxiter=200, **kwargs):
"""
Identify clusters using DBSCAN algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
dict
boolean array for each identified cluster and core samples.
"""
if n_clusters is None:
if eps is None:
eps = 0.3
db = cl.DBSCAN(eps=eps, min_samples=min_samples, **kwargs).fit(data)
else:
clusters = 0
eps_temp = 1 / .95
niter = 0
while clusters < n_clusters:
clusters_last = clusters
eps_temp *= 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
if clusters < clusters_last:
eps_temp *= 1 / 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
|
python
|
{
"resource": ""
}
|
q275684
|
get_defined_srms
|
test
|
def get_defined_srms(srm_file):
"""
Returns list of SRMS defined in
|
python
|
{
"resource": ""
}
|
q275685
|
read_configuration
|
test
|
def read_configuration(config='DEFAULT'):
"""
Read LAtools configuration file, and return parameters as dict.
"""
# read configuration file
_, conf = read_latoolscfg()
# if 'DEFAULT', check which is the default configuration
if config == 'DEFAULT':
config = conf['DEFAULT']['config']
|
python
|
{
"resource": ""
}
|
q275686
|
read_latoolscfg
|
test
|
def read_latoolscfg():
"""
Reads configuration, returns a ConfigParser object.
Distinct from read_configuration, which
|
python
|
{
"resource": ""
}
|
q275687
|
print_all
|
test
|
def print_all():
"""
Prints all currently defined configurations.
"""
# read configuration file
_, conf = read_latoolscfg()
default = conf['DEFAULT']['config']
pstr = '\nCurrently defined LAtools configurations:\n\n'
for s in conf.sections():
if s == default:
pstr += s + ' [DEFAULT]\n'
elif s == 'REPRODUCE':
pstr += s + ' [DO NOT ALTER]\n'
else:
pstr
|
python
|
{
"resource": ""
}
|
q275688
|
copy_SRM_file
|
test
|
def copy_SRM_file(destination=None, config='DEFAULT'):
"""
Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used.
"""
# find SRM file from configuration
conf = read_configuration()
|
python
|
{
"resource": ""
}
|
q275689
|
create
|
test
|
def create(config_name, srmfile=None, dataformat=None, base_on='DEFAULT', make_default=False):
"""
Adds a new configuration to latools.cfg.
Parameters
----------
config_name : str
The name of the new configuration. This should be descriptive
(e.g. UC Davis Foram Group)
srmfile : str (optional)
The location of the srm file used for calibration.
dataformat : str (optional)
The location of the dataformat definition to use.
base_on : str
The name of the existing configuration to base the new one on.
If either srm_file or dataformat are not specified, the new
|
python
|
{
"resource": ""
}
|
q275690
|
change_default
|
test
|
def change_default(config):
"""
Change the default configuration.
"""
config_file, cf = read_latoolscfg()
if config not in cf.sections():
raise ValueError("\n'{:s}' is not a defined configuration.".format(config))
if config == 'REPRODUCE':
pstr = ('Are you SURE you want to set REPRODUCE as your default configuration?\n' +
' ... this is an odd thing to be doing.')
else:
pstr = ('Are you sure you want to change the default configuration from {:s}'.format(cf['DEFAULT']['config']) +
|
python
|
{
"resource": ""
}
|
q275691
|
exclude_downhole
|
test
|
def exclude_downhole(filt, threshold=2):
"""
Exclude all data after the first excluded portion.
This makes sense for spot measurements where, because
of the signal mixing inherent in LA-ICPMS, once a
contaminant is ablated, it will always be present to
some degree in signals from further down the ablation
pit.
Parameters
----------
filt : boolean array
threshold : int
Returns
-------
filter : boolean array
"""
cfilt = filt.copy()
|
python
|
{
"resource": ""
}
|
q275692
|
defrag
|
test
|
def defrag(filt, threshold=3, mode='include'):
"""
'Defragment' a filter.
Parameters
----------
filt : boolean array
A filter
threshold : int
Consecutive values equal to or below this threshold
length are considered fragments, and will be removed.
mode : str
Wheter to change False fragments to True ('include')
or True fragments to False ('exclude')
Returns
-------
defragmented filter : boolean array
"""
if bool_2_indices(filt) is None:
return filt
if mode == 'include':
|
python
|
{
"resource": ""
}
|
q275693
|
D.despike
|
test
|
def despike(self, expdecay_despiker=True, exponent=None,
noise_despiker=True, win=3, nlim=12., maxiter=3):
"""
Applies expdecay_despiker and noise_despiker to data.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
|
python
|
{
"resource": ""
}
|
q275694
|
D.autorange_plot
|
test
|
def autorange_plot(self, analyte='total_counts', gwin=7, swin=None, win=20,
on_mult=[1.5, 1.], off_mult=[1., 1.5],
transform='log'):
"""
Plot a detailed autorange report for this sample.
"""
if analyte is None:
# sig = self.focus[self.internal_standard]
sig = self.data['total_counts']
elif analyte == 'total_counts':
sig = self.data['total_counts']
elif analyte in self.analytes:
sig = self.focus[analyte]
else:
|
python
|
{
"resource": ""
}
|
q275695
|
D.mkrngs
|
test
|
def mkrngs(self):
"""
Transform boolean arrays into list of limit pairs.
Gets Time limits of signal/background boolean arrays and stores them as
sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in
the analyse object.
"""
bbool = bool_2_indices(self.bkg)
if bbool is not None:
self.bkgrng = self.Time[bbool]
else:
self.bkgrng = [[np.nan, np.nan]]
sbool = bool_2_indices(self.sig)
if sbool is not None:
self.sigrng = self.Time[sbool]
else:
self.sigrng = [[np.nan, np.nan]]
|
python
|
{
"resource": ""
}
|
q275696
|
D.ratio
|
test
|
def ratio(self, internal_standard=None):
"""
Divide all analytes by a specified internal_standard analyte.
Parameters
----------
internal_standard : str
The analyte used as the internal_standard.
Returns
-------
None
"""
if internal_standard is not None:
self.internal_standard = internal_standard
self.data['ratios'] = Bunch()
|
python
|
{
"resource": ""
}
|
q275697
|
D.calibrate
|
test
|
def calibrate(self, calib_ps, analytes=None):
"""
Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None
"""
# can have calibration function stored in self and pass *coefs?
|
python
|
{
"resource": ""
}
|
q275698
|
D.sample_stats
|
test
|
def sample_stats(self, analytes=None, filt=True,
stat_fns={},
eachtrace=True):
"""
Calculate sample statistics
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Parameters
----------
analytes : array_like
List of analytes to calculate the statistic on
filt : bool or str
The filter to apply to the data when calculating sample statistics.
bool: True applies filter specified in filt.switches.
str: logical string specifying a partucular filter
stat_fns : dict
Dict of {name: function} pairs. Functions that take a single
array_like input, and return a single statistic. Function should
be able to cope with NaN values.
eachtrace : bool
True: per - ablation statistics
False: whole sample statistics
Returns
-------
None
"""
if analytes is None:
|
python
|
{
"resource": ""
}
|
q275699
|
D.ablation_times
|
test
|
def ablation_times(self):
"""
Function for calculating the ablation time for each
ablation.
Returns
-------
dict of times for each ablation.
"""
ats
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.