Dataset Viewer
text
stringlengths 0
828
|
---|
,bio_main
|
0,"def show_slug_with_level(context, page, lang=None, fallback=True):
|
""""""Display slug with level by language.""""""
|
if not lang:
|
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
|
page = get_page_from_string_or_id(page, lang)
|
if not page:
|
return ''
|
return {'content': page.slug_with_level(lang)}"
|
1,"def show_revisions(context, page, content_type, lang=None):
|
""""""Render the last 10 revisions of a page content with a list using
|
the ``pages/revisions.html`` template""""""
|
if not pages_settings.PAGE_CONTENT_REVISION:
|
return {'revisions': None}
|
revisions = Content.objects.filter(page=page, language=lang,
|
type=content_type).order_by('-creation_date')
|
if len(revisions) < 2:
|
return {'revisions': None}
|
return {'revisions': revisions[0:10]}"
|
2,"def do_videoplaceholder(parser, token):
|
""""""
|
Method that parse the imageplaceholder template tag.
|
""""""
|
name, params = parse_placeholder(parser, token)
|
return VideoPlaceholderNode(name, **params)"
|
3,"def do_get_pages_with_tag(parser, token):
|
""""""
|
Return Pages with given tag
|
Syntax::
|
{% get_pages_with_tag <tag name> as <varname> %}
|
Example use:
|
{% get_pages_with_tag ""footer"" as pages %}
|
""""""
|
bits = token.split_contents()
|
if 4 != len(bits):
|
raise TemplateSyntaxError('%r expects 2 arguments' % bits[0])
|
if bits[-2] != 'as':
|
raise TemplateSyntaxError(
|
'%r expects ""as"" as the second last argument' % bits[0])
|
varname = bits[-1]
|
tag = parser.compile_filter(bits[1])
|
varname = bits[-1]
|
return GetPagesWithTagNode(tag, varname)"
|
4,"def parserunstats(self):
|
""""""Parses the XML run statistics file (GenerateFASTQRunStatistics.xml). In some cases, the file is not
|
available. Equivalent data can be pulled from Basespace.Generate a text file name indexingQC.txt containing
|
the copied tables from the Indexing QC tab of the run on Basespace""""""
|
# metadata = GenObject()
|
# If the default file GenerateFASTQRunStatistics.xml is present, parse it
|
if os.path.isfile(os.path.join(self.path, ""GenerateFASTQRunStatistics.xml"")):
|
# Create a list of keys for which values are to be extracted
|
datalist = [""SampleNumber"", ""SampleID"", ""SampleName"", ""NumberOfClustersPF""]
|
# Load the file as an xml ElementTree object
|
runstatistics = ElementTree.ElementTree(file=os.path.join(self.path, ""GenerateFASTQRunStatistics.xml""))
|
# Iterate through all the elements in the object
|
# .iterfind() allow for the matching and iterating though matches
|
# This is stored as a float to allow subsequent calculations
|
tclusterspf = [float(element.text) for element in runstatistics.iterfind(""RunStats/NumberOfClustersPF"")][0]
|
# Iterate through all the elements (strains) in the OverallSamples/SummarizedSampleStatistics category
|
for element in runstatistics.iterfind(""OverallSamples/SummarizedSampleStatistics""):
|
# List comprehension. Essentially iterate through each element for each category in datalist:
|
# (element.iter(category) and pull out the value for nestedelement
|
straindata = [nestedelement.text for category in datalist for nestedelement in element.iter(category)]
|
# Try and replicate the Illumina rules to create file names from ""Sample_Name""
|
samplename = samplenamer(straindata, 1)
|
# Calculate the percentage of clusters associated with each strain
|
# noinspection PyTypeChecker
|
percentperstrain = ""{:.2f}"".format((float(straindata[3]) / tclusterspf * 100))
|
try:
|
# Use the sample number -1 as the index in the list of objects created in parsesamplesheet
|
strainindex = int(straindata[0]) - 1
|
# Set run to the .run object of self.samples[index]
|
run = self.samples[strainindex].run
|
# An assertion that compares the sample computer above to the previously entered sample name
|
# to ensure that the samples are the same
|
assert self.samples[strainindex].name == samplename, \
|
""Sample name does not match object name {0!r:s}"".format(straindata[1])
|
# Add the appropriate values to the strain metadata object
|
run.SampleNumber = straindata[0]
|
run.NumberofClustersPF = straindata[3]
|
run.TotalClustersinRun = tclusterspf
|
run.PercentOfClusters = percentperstrain
|
run.flowcell = self.flowcell
|
run.instrument = self.instrument
|
except IndexError:
|
pass
|
elif os.path.isfile(os.path.join(self.path, 'indexingQC.txt')):
|
# Grab the first element from the second line in the file
|
tclusterspf = float(getline(os.path.join(self.path, ""indexingQC.txt""), 2).split(""\t"")[0])
|
# Open the file and extract the relevant data
|
with open(os.path.join(""indexingQC.txt"")) as indexqc:
|
# Iterate through the file
|
for line in indexqc:
|
# Once ""Index"" is encountered, iterate through the rest of the file
|
if ""Index"" in line:
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 13