id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Indomielibs-2.0.106.tar.gz/Indomielibs-2.0.106/pyrogram/types/messages_and_media/thumbnail.py |
from typing import List, Optional, Union
import pyrogram
from pyrogram import raw
from pyrogram.file_id import FileId, FileType, FileUniqueId, FileUniqueType, ThumbnailSource
from ..object import Object
class Thumbnail(Object):
"""One size of a photo or a file/sticker thumbnail.
Parameters:
file_id (``str``):
Identifier for this file, which can be used to download or reuse the file.
file_unique_id (``str``):
Unique identifier for this file, which is supposed to be the same over time and for different accounts.
Can't be used to download or reuse the file.
width (``int``):
Photo width.
height (``int``):
Photo height.
file_size (``int``):
File size.
"""
def __init__(
self,
*,
client: "pyrogram.Client" = None,
file_id: str,
file_unique_id: str,
width: int,
height: int,
file_size: int
):
super().__init__(client)
self.file_id = file_id
self.file_unique_id = file_unique_id
self.width = width
self.height = height
self.file_size = file_size
@staticmethod
def _parse(client, media: Union["raw.types.Photo", "raw.types.Document"]) -> Optional[List["Thumbnail"]]:
if isinstance(media, raw.types.Photo):
raw_thumbs = [i for i in media.sizes if isinstance(i, raw.types.PhotoSize)]
raw_thumbs.sort(key=lambda p: p.size)
raw_thumbs = raw_thumbs[:-1]
file_type = FileType.PHOTO
elif isinstance(media, raw.types.Document):
raw_thumbs = media.thumbs
file_type = FileType.THUMBNAIL
else:
return
parsed_thumbs = []
for thumb in raw_thumbs:
if not isinstance(thumb, raw.types.PhotoSize):
continue
parsed_thumbs.append(
Thumbnail(
file_id=FileId(
file_type=file_type,
dc_id=media.dc_id,
media_id=media.id,
access_hash=media.access_hash,
file_reference=media.file_reference,
thumbnail_file_type=file_type,
thumbnail_source=ThumbnailSource.THUMBNAIL,
thumbnail_size=thumb.type,
volume_id=0,
local_id=0
).encode(),
file_unique_id=FileUniqueId(
file_unique_type=FileUniqueType.DOCUMENT,
media_id=media.id
).encode(),
width=thumb.w,
height=thumb.h,
file_size=thumb.size,
client=client
)
)
return parsed_thumbs or None | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/dvips.py |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Builder
import SCons.Tool.dvipdf
import SCons.Util
def DviPsFunction(target = None, source= None, env=None):
result = SCons.Tool.dvipdf.DviPdfPsFunction(PSAction,target,source,env)
return result
def DviPsStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result
PSAction = None
DVIPSAction = None
PSBuilder = None
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps'
def exists(env):
SCons.Tool.tex.generate_darwin(env)
return env.Detect('dvips')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/coherence/upnp/devices/wan_device_client.py |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2010 Frank Scholz <[email protected]>
from coherence.upnp.devices.wan_connection_device_client import WANConnectionDeviceClient
from coherence.upnp.services.clients.wan_common_interface_config_client import WANCommonInterfaceConfigClient
from coherence import log
import coherence.extern.louie as louie
class WANDeviceClient(log.Loggable):
logCategory = 'wan_device_client'
def __init__(self, device):
log.Loggable.__init__(self)
self.device = device
self.device_type = self.device.get_friendly_device_type()
self.version = int(self.device.get_device_type_version())
self.icons = device.icons
self.wan_connection_device = None
self.wan_common_interface_connection = None
self.embedded_device_detection_completed = False
self.service_detection_completed = False
louie.connect(self.embedded_device_notified, signal='Coherence.UPnP.EmbeddedDeviceClient.detection_completed', sender=self.device)
try:
wan_connection_device = self.device.get_embedded_device_by_type('WANConnectionDevice')[0]
self.wan_connection_device = WANConnectionDeviceClient(wan_connection_device)
except:
self.warning("Embedded WANConnectionDevice device not available, device not implemented properly according to the UPnP specification")
raise
louie.connect(self.service_notified, signal='Coherence.UPnP.DeviceClient.Service.notified', sender=self.device)
for service in self.device.get_services():
if service.get_type() in ["urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1"]:
self.wan_common_interface_connection = WANCommonInterfaceConfigClient(service)
self.info("WANDevice %s", self.device.get_friendly_name())
def remove(self):
self.info("removal of WANDeviceClient started")
if self.wan_common_interface_connection != None:
self.wan_common_interface_connection.remove()
if self.wan_connection_device != None:
self.wan_connection_device.remove()
def embedded_device_notified(self, device):
self.info("EmbeddedDevice %r sent notification", device)
if self.embedded_device_detection_completed == True:
return
self.embedded_device_detection_completed = True
if self.embedded_device_detection_completed == True and self.service_detection_completed == True:
louie.send('Coherence.UPnP.EmbeddedDeviceClient.detection_completed', None,
self)
def service_notified(self, service):
self.info("Service %r sent notification", service)
if self.service_detection_completed == True:
return
if self.wan_common_interface_connection != None:
if not hasattr(self.wan_common_interface_connection.service, 'last_time_updated'):
return
if self.wan_common_interface_connection.service.last_time_updated == None:
return
self.service_detection_completed = True
if self.embedded_device_detection_completed == True and self.service_detection_completed == True:
louie.send('Coherence.UPnP.EmbeddedDeviceClient.detection_completed', None,
self) | PypiClean |
/GenomeTreeTk-0.1.6.tar.gz/GenomeTreeTk-0.1.6/genometreetk/main.py |
import csv
import logging
import sys
import dendropy
from biolib.common import check_file_exists, make_sure_path_exists
from biolib.external.execute import check_dependencies
from biolib.newick import parse_label
from biolib.taxonomy import Taxonomy
from genometreetk.arb import Arb
from genometreetk.bootstrap import Bootstrap
from genometreetk.combine_support import CombineSupport
from genometreetk.common import read_gtdb_metadata
from genometreetk.derep_tree import DereplicateTree
from genometreetk.jackknife_markers import JackknifeMarkers
from genometreetk.jackknife_taxa import JackknifeTaxa
from genometreetk.phylogenetic_diversity import PhylogeneticDiversity
from genometreetk.arb import Arb
from genometreetk.derep_tree import DereplicateTree
from genometreetk.prune import Prune
from genometreetk.reroot_tree import RerootTree
from genometreetk.rna_workflow import RNA_Workflow
csv.field_size_limit(sys.maxsize)
class OptionsParser():
def __init__(self):
"""Initialization"""
self.logger = logging.getLogger()
def ssu_tree(self, options):
"""Infer 16S tree spanning GTDB genomes."""
check_dependencies(['mothur', 'ssu-align', 'ssu-mask', 'FastTreeMP', 'blastn'])
check_file_exists(options.gtdb_metadata_file)
check_file_exists(options.gtdb_ssu_file)
make_sure_path_exists(options.output_dir)
rna_workflow = RNA_Workflow(options.cpus)
rna_workflow.run('ssu',
options.gtdb_metadata_file,
options.gtdb_ssu_file,
options.min_ssu_length,
options.min_scaffold_length,
options.min_quality,
options.max_contigs,
options.min_N50,
not options.disable_tax_filter,
options.genome_list,
options.output_dir,
options.align_method)
self.logger.info('Results written to: %s' % options.output_dir)
def lsu_tree(self, options):
"""Infer 23S tree spanning GTDB genomes."""
check_dependencies(['esl-sfetch', 'cmsearch', 'cmalign', 'esl-alimask', 'FastTreeMP', 'blastn'])
check_file_exists(options.gtdb_metadata_file)
check_file_exists(options.gtdb_lsu_file)
make_sure_path_exists(options.output_dir)
rna_workflow = RNA_Workflow(options.cpus)
rna_workflow.run('lsu',
options.gtdb_metadata_file,
options.gtdb_lsu_file,
options.min_lsu_length,
options.min_scaffold_length,
options.min_quality,
options.max_contigs,
options.min_N50,
not options.disable_tax_filter,
#options.reps_only,
#options.user_genomes,
options.genome_list,
options.output_dir)
self.logger.info('Results written to: %s' % options.output_dir)
def rna_tree(self, options):
"""Infer 16S + 23S tree spanning GTDB genomes."""
check_dependencies(['FastTreeMP'])
check_file_exists(options.ssu_msa)
check_file_exists(options.ssu_tree)
check_file_exists(options.lsu_msa)
check_file_exists(options.lsu_tree)
make_sure_path_exists(options.output_dir)
rna_workflow = RNA_Workflow(options.cpus)
rna_workflow.combine(options.ssu_msa,
options.ssu_tree,
options.lsu_msa,
options.lsu_tree,
options.output_dir)
self.logger.info('Results written to: %s' % options.output_dir)
def rna_dump(self, options):
"""Dump all 5S, 16S, and 23S sequences to files."""
check_file_exists(options.genomic_file)
make_sure_path_exists(options.output_dir)
rna_workflow = RNA_Workflow(1)
rna_workflow.dump(options.genomic_file,
options.gtdb_taxonomy,
options.min_5S_len,
options.min_16S_ar_len,
options.min_16S_bac_len,
options.min_23S_len,
options.min_contig_len,
options.include_user,
options.genome_list,
options.output_dir)
self.logger.info('Results written to: %s' % options.output_dir)
def derep_tree(self, options):
"""Dereplicate tree."""
check_file_exists(options.input_tree)
check_file_exists(options.gtdb_metadata)
check_file_exists(options.msa_file)
make_sure_path_exists(options.output_dir)
derep_tree = DereplicateTree()
derep_tree.run(options.input_tree,
options.lineage_of_interest,
options.outgroup,
options.gtdb_metadata,
options.taxa_to_retain,
options.msa_file,
options.keep_unclassified,
options.output_dir)
def bootstrap(self, options):
"""Bootstrap multiple sequence alignment."""
check_file_exists(options.input_tree)
if options.msa_file != 'NONE':
check_file_exists(options.msa_file)
make_sure_path_exists(options.output_dir)
bootstrap = Bootstrap(options.cpus)
output_tree = bootstrap.run(options.input_tree,
options.msa_file,
options.num_replicates,
options.model,
options.gamma,
options.base_type,
options.fraction,
options.boot_dir,
options.output_dir)
self.logger.info('Bootstrapped tree written to: %s' % output_tree)
def jk_markers(self, options):
"""Jackknife marker genes."""
check_file_exists(options.input_tree)
if options.msa_file != 'NONE':
check_file_exists(options.msa_file)
make_sure_path_exists(options.output_dir)
jackknife_markers = JackknifeMarkers(options.cpus)
output_tree = jackknife_markers.run(options.input_tree,
options.msa_file,
options.marker_info_file,
options.mask_file,
options.perc_markers,
options.num_replicates,
options.model,
options.jk_dir,
options.output_dir)
self.logger.info('Jackknifed marker tree written to: %s' % output_tree)
def jk_taxa(self, options):
"""Jackknife taxa."""
check_file_exists(options.input_tree)
check_file_exists(options.msa_file)
make_sure_path_exists(options.output_dir)
jackknife_taxa = JackknifeTaxa(options.cpus)
output_tree = jackknife_taxa.run(options.input_tree,
options.msa_file,
options.outgroup_ids,
options.perc_taxa,
options.num_replicates,
options.model,
options.output_dir)
self.logger.info('Jackknifed taxa tree written to: %s' % output_tree)
def combine(self, options):
"""Combine support values into a single tree."""
combineSupport = CombineSupport()
combineSupport.run(options.support_type,
options.bootstrap_tree,
options.jk_marker_tree,
options.jk_taxa_tree,
options.output_tree)
def support_wf(self, options):
""""Perform entire tree support workflow."""
self.bootstrap(options)
self.jk_markers(options)
self.jk_taxa(options)
self.combine(options)
def midpoint(self, options):
""""Midpoint root tree."""
reroot = RerootTree()
reroot.midpoint(options.input_tree, options.output_tree)
def outgroup(self, options):
"""Reroot tree with outgroup."""
check_file_exists(options.taxonomy_file)
self.logger.info('Identifying genomes from the specified outgroup.')
outgroup = set()
for genome_id, taxa in Taxonomy().read(options.taxonomy_file).items():
if options.outgroup_taxon in taxa:
outgroup.add(genome_id)
self.logger.info('Identifying %d genomes in the outgroup.' % len(outgroup))
reroot = RerootTree()
reroot.root_with_outgroup(options.input_tree, options.output_tree, outgroup)
def fill_ranks(self, options):
"""Ensure taxonomy strings contain all 7 canonical ranks."""
check_file_exists(options.input_taxonomy)
fout = open(options.output_taxonomy, 'w')
taxonomy = Taxonomy()
t = taxonomy.read(options.input_taxonomy)
for genome_id, taxon_list in t.items():
full_taxon_list = taxonomy.fill_missing_ranks(taxon_list)
taxonomy_str = ';'.join(full_taxon_list)
if not taxonomy.check_full(taxonomy_str):
sys.exit(-1)
fout.write('%s\t%s\n' % (genome_id, taxonomy_str))
fout.close()
self.logger.info('Revised taxonomy written to: %s' % options.output_taxonomy)
def propagate(self, options):
"""Propagate labels to all genomes in a cluster."""
check_file_exists(options.input_taxonomy)
check_file_exists(options.metadata_file)
# get representative genome information
rep_metadata = read_gtdb_metadata(options.metadata_file, ['gtdb_representative',
'gtdb_clustered_genomes'])
taxonomy = Taxonomy()
explict_tax = taxonomy.read(options.input_taxonomy)
expanded_taxonomy = {}
incongruent_count = 0
for genome_id, taxon_list in explict_tax.items():
taxonomy_str = ';'.join(taxon_list)
# Propagate taxonomy strings if genome is a representatives. Also, determine
# if genomes clustered together have compatible taxonomies. Note that a genome
# may not have metadata as it is possible a User has removed a genome that is
# in the provided taxonomy file.
_rep_genome, clustered_genomes = rep_metadata.get(genome_id, (None, None))
if clustered_genomes: # genome is a representative
clustered_genome_ids = clustered_genomes.split(';')
# get taxonomy of all genomes in cluster with a specified taxonomy
clustered_genome_tax = {}
for cluster_genome_id in clustered_genome_ids:
if cluster_genome_id == genome_id:
continue
if cluster_genome_id not in rep_metadata:
continue # genome is no longer in the GTDB so ignore it
if cluster_genome_id in explict_tax:
clustered_genome_tax[cluster_genome_id] = explict_tax[cluster_genome_id]
# determine if representative and clustered genome taxonomy strings are congruent
working_cluster_taxonomy = list(taxon_list)
incongruent_with_rep = False
for cluster_genome_id, cluster_tax in clustered_genome_tax.items():
if incongruent_with_rep:
working_cluster_taxonomy = list(taxon_list) # default to rep taxonomy
break
for r in range(0, len(Taxonomy.rank_prefixes)):
if cluster_tax[r] == Taxonomy.rank_prefixes[r]:
break # no more taxonomy information to consider
if cluster_tax[r] != taxon_list[r]:
if taxon_list[r] == Taxonomy.rank_prefixes[r]:
# clustered genome has a more specific taxonomy string which
# should be propagate to the representative if all clustered
# genomes are in agreement
if working_cluster_taxonomy[r] == Taxonomy.rank_prefixes[r]:
# make taxonomy more specific based on genomes in cluster
working_cluster_taxonomy[r] = cluster_tax[r]
elif working_cluster_taxonomy[r] != cluster_tax[r]:
# not all genomes agree on the assignment of this rank so leave it unspecified
working_cluster_taxonomy[r] = Taxonomy.rank_prefixes[r]
break
else:
# genomes in cluster have incongruent taxonomies so defer to representative
self.logger.warning("Genomes in cluster have incongruent taxonomies.")
self.logger.warning("Representative %s: %s" % (genome_id, taxonomy_str))
self.logger.warning("Clustered genome %s: %s" % (cluster_genome_id, ';'.join(cluster_tax)))
self.logger.warning("Deferring to taxonomy specified for representative.")
incongruent_count += 1
incongruent_with_rep = True
break
cluster_taxonomy_str = ';'.join(working_cluster_taxonomy)
# assign taxonomy to representative and all genomes in the cluster
expanded_taxonomy[genome_id] = cluster_taxonomy_str
for cluster_genome_id in clustered_genome_ids:
expanded_taxonomy[cluster_genome_id] = cluster_taxonomy_str
else:
if genome_id in expanded_taxonomy:
# genome has already been assigned a taxonomy based on its representative
pass
else:
# genome is a singleton
expanded_taxonomy[genome_id] = taxonomy_str
self.logger.info('Identified %d clusters with incongruent taxonomies.' % incongruent_count)
fout = open(options.output_taxonomy, 'w')
for genome_id, taxonomy_str in expanded_taxonomy.items():
fout.write('%s\t%s\n' % (genome_id, taxonomy_str))
fout.close()
self.logger.info('Taxonomy written to: %s' % options.output_taxonomy)
def strip(self, options):
"""Remove taxonomic labels from tree."""
check_file_exists(options.input_tree)
outgroup_in_tree = set()
tree = dendropy.Tree.get_from_path(options.input_tree,
schema='newick',
rooting='force-rooted',
preserve_underscores=True)
for node in tree.internal_nodes():
if node.label:
if ':' in node.label:
support, _taxa = node.label.split(':')
node.label = support
else:
try:
# if number if a float (or int) treat
# it as a support value
f = float(node.label)
except ValueError:
node.label = None
tree.write_to_path(options.output_tree,
schema='newick',
suppress_rooting=True,
unquoted_underscores=True)
self.logger.info('Stripped tree written to: %s' % options.output_tree)
def rm_support(self, options):
"""Remove support values from tree."""
check_file_exists(options.input_tree)
outgroup_in_tree = set()
tree = dendropy.Tree.get_from_path(options.input_tree,
schema='newick',
rooting='force-rooted',
preserve_underscores=True)
for node in tree.internal_nodes():
if node.label:
if ':' in node.label:
support, taxa = node.label.split(':')
node.label = taxa
else:
try:
# if number if a float (or int) treat
# it as a support value
f = float(node.label)
node.label = None
except ValueError:
pass # keep other labels
tree.write_to_path(options.output_tree,
schema='newick',
suppress_rooting=True,
unquoted_underscores=True)
self.logger.info('Stripped tree written to: %s' % options.output_tree)
def pull(self, options):
"""Create taxonomy file from a decorated tree."""
check_file_exists(options.input_tree)
if options.no_validation:
tree = dendropy.Tree.get_from_path(options.input_tree,
schema='newick',
rooting="force-rooted",
preserve_underscores=True)
taxonomy = {}
for leaf in tree.leaf_node_iter():
taxon_id = leaf.taxon.label
node = leaf.parent_node
taxa = []
while node:
support, taxon, aux_info = parse_label(node.label)
if taxon:
for t in map(str.strip, taxon.split(';'))[::-1]:
taxa.append(t)
node = node.parent_node
taxonomy[taxon_id] = taxa[::-1]
else:
taxonomy = Taxonomy().read_from_tree(options.input_tree)
Taxonomy().write(taxonomy, options.output_taxonomy)
self.logger.info('Stripped tree written to: %s' % options.output_taxonomy)
def append(self, options):
"""Append command"""
check_file_exists(options.input_tree)
check_file_exists(options.input_taxonomy)
taxonomy = Taxonomy().read(options.input_taxonomy)
tree = dendropy.Tree.get_from_path(options.input_tree,
schema='newick',
rooting='force-rooted',
preserve_underscores=True)
for n in tree.leaf_node_iter():
taxa_str = taxonomy.get(n.taxon.label, None)
if taxa_str == None:
self.logger.error('Taxonomy file does not contain an entry for %s.' % n.label)
sys.exit(-1)
n.taxon.label = n.taxon.label + '|' + '; '.join(taxonomy[n.taxon.label])
tree.write_to_path(options.output_tree,
schema='newick',
suppress_rooting=True,
unquoted_underscores=True)
self.logger.info('Decorated tree written to: %s' % options.output_tree)
def prune(self, options):
"""Prune tree."""
check_file_exists(options.input_tree)
check_file_exists(options.taxa_to_retain)
prune = Prune()
prune.run(options.input_tree,
options.taxa_to_retain,
options.output_tree)
def phylogenetic_diversity(self, options):
"""Calculate phylogenetic diversity of extant taxa."""
check_file_exists(options.tree)
check_file_exists(options.taxa_list)
pd = PhylogeneticDiversity()
rtn = pd.pd(options.tree, options.taxa_list, options.per_taxa_pg_file)
total_pd, num_in_taxa, in_pd, num_out_taxa, out_pd = rtn
total_taxa = num_in_taxa + num_out_taxa
in_pg = total_pd - out_pd
# report phylogenetic diversity (PD) and gain (PG)
print('')
print('\tNo. Taxa\tPD\tPercent PD')
print('%s\t%d\t%.2f\t%.2f%%' % ('Full tree', total_taxa, total_pd, 100))
print('%s\t%d\t%.2f\t%.3f%%' % ('Outgroup taxa (PD)',
num_out_taxa,
out_pd,
out_pd * 100 / total_pd))
print('%s\t%d\t%.2f\t%.3f%%' % ('Ingroup taxa (PD)',
num_in_taxa,
in_pd,
(in_pd) * 100 / total_pd))
print('%s\t%d\t%.2f\t%.3f%%' % ('Ingroup taxa (PG)',
num_in_taxa,
in_pg,
in_pg * 100 / total_pd))
def phylogenetic_diversity_clade(self, options):
"""Calculate phylogenetic diversity of named groups."""
check_file_exists(options.decorated_tree)
pd = PhylogeneticDiversity()
pd.pd_clade(options.decorated_tree,
options.taxa_list,
options.output_file)
def arb_records(self, options):
"""Create an ARB records file from GTDB metadata."""
check_file_exists(options.metadata_file)
arb = Arb()
arb.create_records(options.metadata_file,
options.msa_file,
options.taxonomy_file,
options.genome_list,
options.output_file)
def parse_options(self, options):
"""Parse user options and call the correct pipeline(s)"""
logging.basicConfig(format='', level=logging.INFO)
check_dependencies(('FastTree', 'hmmsearch'))
if options.subparser_name == 'ssu_tree':
self.ssu_tree(options)
elif options.subparser_name == 'lsu_tree':
self.lsu_tree(options)
elif options.subparser_name == 'rna_tree':
self.rna_tree(options)
elif options.subparser_name == 'rna_dump':
self.rna_dump(options)
elif options.subparser_name == 'derep_tree':
self.derep_tree(options)
elif options.subparser_name == 'bootstrap':
self.bootstrap(options)
elif options.subparser_name == 'jk_markers':
self.jk_markers(options)
elif options.subparser_name == 'jk_taxa':
self.jk_taxa(options)
elif options.subparser_name == 'combine':
self.combine(options)
elif options.subparser_name == 'midpoint':
self.midpoint(options)
elif options.subparser_name == 'outgroup':
self.outgroup(options)
elif options.subparser_name == 'propagate':
self.propagate(options)
elif options.subparser_name == 'fill_ranks':
self.fill_ranks(options)
elif options.subparser_name == 'strip':
self.strip(options)
elif options.subparser_name == 'rm_support':
self.rm_support(options)
elif options.subparser_name == 'pull':
self.pull(options)
elif options.subparser_name == 'append':
self.append(options)
elif options.subparser_name == 'prune':
self.prune(options)
elif options.subparser_name == 'pd':
self.phylogenetic_diversity(options)
elif options.subparser_name == 'pd_clade':
self.phylogenetic_diversity_clade(options)
elif options.subparser_name == 'arb_records':
self.arb_records(options)
else:
self.logger.error('Unknown GenomeTreeTk command: ' + options.subparser_name + '\n')
sys.exit(-1)
return 0 | PypiClean |
/EclipsingBinaries-4.0.2-py3-none-any.whl/docs/pipeline.rst | Pipeline
========
Usage
-----
The reason to use the pipeline is for automatic data reduction, finding comparison stars, multi-aperture photometry, etc.
To run the pipeline, simply run::
EB_pipeline -h
This will output the options available to a user for inputs that are allowed or required.
Inputs
------
There are two required inputs by the user and the first is an ``input folder``. This is the folder pathway where the images that being taken by a telescope are going to. The second required input is the ``output folder``. This is where the user wants the new reudced images and created files to go to.
These next few inputs all have default options and are not required by the user to replace them with.
+ ``--time`` How long should the program wait to see if no new files enter the folder and to start the data reduction. The default value is set at 3600 seconds (i.e. 1 hour).
+ ``--loc`` Location where the images are being taken. At this point in time, the only allowed locations are BSUO or any site that is in `this <https://github.com/astropy/astropy-data/blob/gh-pages/coordinates/sites.json>`_ Astropy list.
+ ``--ra`` and ``--dec`` These are the right ascension and declination of a target system, respectively. The default values are both set at ``00:00:00``, so we recommend setting these values.
+ ``--name`` Variable is designated for the object, that the user is looking at, name. The default value for this is simply ``target``.
Example
-------
An example script setup for the pipeline would be like the following:
``EB_pipeline C:/folder1/folder2/raw_images C:/folder1/folder2/reduced_images --time 3000 --loc CTIO --ra 00:28:27.96 --dec 78:57:42.65 --name NSVS_254037``
If the declination of the object is negative then that becomes ``--dec -78:57:42.65``.
Notice the first two values entered for the ``input folder`` and ``output folder`` do not have any ``--[name]``. The order is also extremely important, as the raw images folder is first and the reduced images folder is second.
| PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/category_update.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
class CategoryUpdate(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'notes': (str, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'notes': 'notes', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CategoryUpdate - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
notes (str, none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CategoryUpdate - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): [optional] # noqa: E501
notes (str, none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/Bussator-0.2.tar.gz/Bussator-0.2/README.md | # Bussator
[](https://gitlab.com/mardy/bussator/commits/master)
[](https://gitlab.com/mardy/bussator/commits/master)
Bussator is a WSGI application which implements a
[webmention](https://www.w3.org/TR/webmention/) receiver. Webmentions can then
be published through dedicated plugins; currently, a plugin for publishing
webmentions as [Isso](https://posativ.org/isso/) comments exists.
I've written Bussator to handle webmentions in [my static
blog](http://www.mardy.it), written with the [Nikola](https://getnikola.com)
site generator and having the comments handled by Isso. It's all served by a
cheap shared hosting solution, so you shouldn't need any special hosting in
order to run a similar setup.
## Installation
Bussator can be installed via [PIP](https://pypi.org/project/Bussator/):
pip install bussator
will install the module with all its dependencies. If you plan to stay up on
the bleeding edge, you can also install the latest master branch:
pip install -e git+https://gitlab.com/mardy/bussator.git#egg=bussator
When you wish to update it, just run `git pull` from within the directory where
the code was checked out (if using virtualenv, this should be
`<virtualenv>/src/bussator`).
## Deployment
Once the Bussator module has been installed and can be found by your python
interpreter, you need to instruct your webserver to use it. Given that the
module provides a `make_app()` method which creates the WSGI application, its
deployment with `mod_wsgi` or `mod_fastcgi` should be relatively easy. If you
have been able to run Bussator on other types of deployment, you are warmly
invited to share your success story with us by [opening an
issue](https://gitlab.com/mardy/bussator/issues/new).
- `mod_wsgi`: I haven't tested this, but the [instructions from the isso
project](https://github.com/posativ/isso/blob/master/docs/docs/extras/deployment.rst#mod-wsgi)
should work for Bussator too, with the obvious adaptations.
- `mod_fastcgi`: install flup (`pip install flup-py3`), then create a
`bussator.fcgi` file in your server's `cgi-bin/` directory (don't forget to
make it executable!):
```
#!/usr/bin/env python
#: uncomment if you're using a virtualenv
#import sys
#sys.path.insert(0, '<your-virtualenv>/lib/python3.6/site-packages')
import os
from bussator import make_app
from flup.server.fcgi import WSGIServer
application = make_app('<path-to-your-config>/config.ini')
WSGIServer(application).run()
```
## Configuration
Bussator won't work unless a configuration file has been created. The
configuration file is a `.ini` style document; it's recommended that you copy
the [template](config.ini) from this repository and modify it as needed. The
comments in the file should be explanative enough.
## Integration with Twitter, Flickr and other online services
The excellent [Brid.gy](https://brid.gy) service can be used to forward
comments and likes from your social networks into Bussator (and hence into your
blog). Just tell it the exact URL of your Bussator endpoint and it should all
work.
| PypiClean |
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/unit_test/EDT_SFT_0048_1.py | from os import system
import matplotlib.pyplot as plt
from typing import List
from box_library.transfert_function_box import get_coeff_vector, euler, trapeze
_ = system('cls')
# =========== Differential methods ===========
def differentiate(order: int, last_index: int, vector: List[float], last_derivatives: List[float], dt: float) -> List[float]:
returning_tuple = []
if last_index > 0:
d_1 = vector[last_index]
d_2 = vector[last_index - 1]
returning_tuple.append(euler(d_1, d_2, dt))
for current_order in range(max(min(order, last_index - 1), 0)):
d_1 = returning_tuple[current_order]
d_0 = last_derivatives[last_index - 1][current_order]
current_derivative = euler(d_1, d_0, dt)
returning_tuple.append(current_derivative)
if current_derivative == 0:
break
while len(returning_tuple) < order:
returning_tuple.append(0)
return returning_tuple
# =========== Differential equation ===========
num = '1 1 0'
num_vect = get_coeff_vector(num)
num_vect.reverse()
num_order = len(num_vect) - 1
# =========== Simulation ===========
fs = 1000
dt = 1/fs
sim_time = 20
samples_number = int(fs * sim_time) + 1
print('=============================================================')
print('|| ||')
print('|| Simulation ||')
print('|| ||')
print('|| X = Y * (P² + P) ||')
print('|| ||')
print('|| - num order :', num_order, ' ||')
print('|| ||')
print('|| - simulation time :', sim_time, ' ||')
print('|| - sample frequency :', fs, ' ||')
print('|| - dt :', dt, ' ||')
print('|| - samples number :', samples_number, ' ||')
print('|| ||')
print('=============================================================')
print('')
print('')
# time
t = [n * dt for n in range(samples_number)]
# input
u = [(12.5*t_)**2 for t_ in t]
# output
u_derivatives : List[List[float]] = []
y = []
for i, t_ in enumerate(t) :
#for i in range(10) :
current_y = 0
current_u_derivative = differentiate(num_order, i, u, u_derivatives, dt)
for j, current_coeff in enumerate(num_vect):
if j == 0:
current_y += (current_coeff * u[i])
else:
current_y += (current_coeff * current_u_derivative[j - 1])
u_derivatives.append(current_u_derivative)
y.append(current_y)
print('at', (len(y) - 1) / fs, 's :', y[len(y) - 1])
# =========== Personnal notes ===========
print('')
print(' =====> Résultats <=====')
print('')
print('date : 16/02/2022')
print('')
print('On se rend bien compte que ma méthode fonctionne pour dériver des u.')
print('Cependant, ma méthode d\'approche pour les y semble très mauvaise !')
print('')
print('')
print('date : 17/02/2022')
print('')
print('Essai d\'intégration des U au lieu de dériver les Y ... TRES MAUVAISES REPONSES !!!')
print('')
print('')
print('date : 18/02/2022')
print('')
print('Reprise depuis le début du travail, voir EDT_SFT_0048_3 pour voir la suite.')
# =========== Plot results ===========
t_renderable = t[0 : len(y)]
u_renderable = u[0 : len(y)]
plt.figure()
plt.plot(t_renderable, u_renderable, label="u(t)")
plt.plot(t_renderable, y, label="y(t)")
plt.legend()
plt.grid()
plt.xlabel('Time')
plt.show() | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/db/backends/sqlite3/_functions.py | import functools
import random
import statistics
from datetime import timedelta
from hashlib import sha1, sha224, sha256, sha384, sha512
from math import (
acos,
asin,
atan,
atan2,
ceil,
cos,
degrees,
exp,
floor,
fmod,
log,
pi,
radians,
sin,
sqrt,
tan,
)
from re import search as re_search
from django.db.backends.base.base import timezone_constructor
from django.db.backends.utils import (
split_tzname_delta,
typecast_time,
typecast_timestamp,
)
from django.utils import timezone
from django.utils.crypto import md5
from django.utils.duration import duration_microseconds
def register(connection):
create_deterministic_function = functools.partial(
connection.create_function,
deterministic=True,
)
create_deterministic_function("django_date_extract", 2, _sqlite_datetime_extract)
create_deterministic_function("django_date_trunc", 4, _sqlite_date_trunc)
create_deterministic_function(
"django_datetime_cast_date", 3, _sqlite_datetime_cast_date
)
create_deterministic_function(
"django_datetime_cast_time", 3, _sqlite_datetime_cast_time
)
create_deterministic_function(
"django_datetime_extract", 4, _sqlite_datetime_extract
)
create_deterministic_function("django_datetime_trunc", 4, _sqlite_datetime_trunc)
create_deterministic_function("django_time_extract", 2, _sqlite_time_extract)
create_deterministic_function("django_time_trunc", 4, _sqlite_time_trunc)
create_deterministic_function("django_time_diff", 2, _sqlite_time_diff)
create_deterministic_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
create_deterministic_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
create_deterministic_function("regexp", 2, _sqlite_regexp)
create_deterministic_function("BITXOR", 2, _sqlite_bitxor)
create_deterministic_function("COT", 1, _sqlite_cot)
create_deterministic_function("LPAD", 3, _sqlite_lpad)
create_deterministic_function("MD5", 1, _sqlite_md5)
create_deterministic_function("REPEAT", 2, _sqlite_repeat)
create_deterministic_function("REVERSE", 1, _sqlite_reverse)
create_deterministic_function("RPAD", 3, _sqlite_rpad)
create_deterministic_function("SHA1", 1, _sqlite_sha1)
create_deterministic_function("SHA224", 1, _sqlite_sha224)
create_deterministic_function("SHA256", 1, _sqlite_sha256)
create_deterministic_function("SHA384", 1, _sqlite_sha384)
create_deterministic_function("SHA512", 1, _sqlite_sha512)
create_deterministic_function("SIGN", 1, _sqlite_sign)
# Don't use the built-in RANDOM() function because it returns a value
# in the range [-1 * 2^63, 2^63 - 1] instead of [0, 1).
connection.create_function("RAND", 0, random.random)
connection.create_aggregate("STDDEV_POP", 1, StdDevPop)
connection.create_aggregate("STDDEV_SAMP", 1, StdDevSamp)
connection.create_aggregate("VAR_POP", 1, VarPop)
connection.create_aggregate("VAR_SAMP", 1, VarSamp)
# Some math functions are enabled by default in SQLite 3.35+.
sql = "select sqlite_compileoption_used('ENABLE_MATH_FUNCTIONS')"
if not connection.execute(sql).fetchone()[0]:
create_deterministic_function("ACOS", 1, _sqlite_acos)
create_deterministic_function("ASIN", 1, _sqlite_asin)
create_deterministic_function("ATAN", 1, _sqlite_atan)
create_deterministic_function("ATAN2", 2, _sqlite_atan2)
create_deterministic_function("CEILING", 1, _sqlite_ceiling)
create_deterministic_function("COS", 1, _sqlite_cos)
create_deterministic_function("DEGREES", 1, _sqlite_degrees)
create_deterministic_function("EXP", 1, _sqlite_exp)
create_deterministic_function("FLOOR", 1, _sqlite_floor)
create_deterministic_function("LN", 1, _sqlite_ln)
create_deterministic_function("LOG", 2, _sqlite_log)
create_deterministic_function("MOD", 2, _sqlite_mod)
create_deterministic_function("PI", 0, _sqlite_pi)
create_deterministic_function("POWER", 2, _sqlite_power)
create_deterministic_function("RADIANS", 1, _sqlite_radians)
create_deterministic_function("SIN", 1, _sqlite_sin)
create_deterministic_function("SQRT", 1, _sqlite_sqrt)
create_deterministic_function("TAN", 1, _sqlite_tan)
def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None):
if dt is None:
return None
try:
dt = typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if conn_tzname:
dt = dt.replace(tzinfo=timezone_constructor(conn_tzname))
if tzname is not None and tzname != conn_tzname:
tzname, sign, offset = split_tzname_delta(tzname)
if offset:
hours, minutes = offset.split(":")
offset_delta = timedelta(hours=int(hours), minutes=int(minutes))
dt += offset_delta if sign == "+" else -offset_delta
dt = timezone.localtime(dt, timezone_constructor(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "year":
return f"{dt.year:04d}-01-01"
elif lookup_type == "quarter":
month_in_quarter = dt.month - (dt.month - 1) % 3
return f"{dt.year:04d}-{month_in_quarter:02d}-01"
elif lookup_type == "month":
return f"{dt.year:04d}-{dt.month:02d}-01"
elif lookup_type == "week":
dt -= timedelta(days=dt.weekday())
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d}"
elif lookup_type == "day":
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d}"
raise ValueError(f"Unsupported lookup type: {lookup_type!r}")
def _sqlite_time_trunc(lookup_type, dt, tzname, conn_tzname):
if dt is None:
return None
dt_parsed = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt_parsed is None:
try:
dt = typecast_time(dt)
except (ValueError, TypeError):
return None
else:
dt = dt_parsed
if lookup_type == "hour":
return f"{dt.hour:02d}:00:00"
elif lookup_type == "minute":
return f"{dt.hour:02d}:{dt.minute:02d}:00"
elif lookup_type == "second":
return f"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}"
raise ValueError(f"Unsupported lookup type: {lookup_type!r}")
def _sqlite_datetime_cast_date(dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "week_day":
return (dt.isoweekday() % 7) + 1
elif lookup_type == "iso_week_day":
return dt.isoweekday()
elif lookup_type == "week":
return dt.isocalendar()[1]
elif lookup_type == "quarter":
return ceil(dt.month / 3)
elif lookup_type == "iso_year":
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "year":
return f"{dt.year:04d}-01-01 00:00:00"
elif lookup_type == "quarter":
month_in_quarter = dt.month - (dt.month - 1) % 3
return f"{dt.year:04d}-{month_in_quarter:02d}-01 00:00:00"
elif lookup_type == "month":
return f"{dt.year:04d}-{dt.month:02d}-01 00:00:00"
elif lookup_type == "week":
dt -= timedelta(days=dt.weekday())
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} 00:00:00"
elif lookup_type == "day":
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} 00:00:00"
elif lookup_type == "hour":
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} {dt.hour:02d}:00:00"
elif lookup_type == "minute":
return (
f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} "
f"{dt.hour:02d}:{dt.minute:02d}:00"
)
elif lookup_type == "second":
return (
f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} "
f"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}"
)
raise ValueError(f"Unsupported lookup type: {lookup_type!r}")
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_prepare_dtdelta_param(conn, param):
if conn in ["+", "-"]:
if isinstance(param, int):
return timedelta(0, 0, param)
else:
return typecast_timestamp(param)
return param
def _sqlite_format_dtdelta(connector, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
- A scalar value, e.g. float
"""
if connector is None or lhs is None or rhs is None:
return None
connector = connector.strip()
try:
real_lhs = _sqlite_prepare_dtdelta_param(connector, lhs)
real_rhs = _sqlite_prepare_dtdelta_param(connector, rhs)
except (ValueError, TypeError):
return None
if connector == "+":
# typecast_timestamp() returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
out = str(real_lhs + real_rhs)
elif connector == "-":
out = str(real_lhs - real_rhs)
elif connector == "*":
out = real_lhs * real_rhs
else:
out = real_lhs / real_rhs
return out
def _sqlite_time_diff(lhs, rhs):
if lhs is None or rhs is None:
return None
left = typecast_time(lhs)
right = typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000)
+ (left.minute * 60 * 1000000)
+ (left.second * 1000000)
+ (left.microsecond)
- (right.hour * 60 * 60 * 1000000)
- (right.minute * 60 * 1000000)
- (right.second * 1000000)
- (right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
if lhs is None or rhs is None:
return None
left = typecast_timestamp(lhs)
right = typecast_timestamp(rhs)
return duration_microseconds(left - right)
def _sqlite_regexp(pattern, string):
if pattern is None or string is None:
return None
if not isinstance(string, str):
string = str(string)
return bool(re_search(pattern, string))
def _sqlite_acos(x):
if x is None:
return None
return acos(x)
def _sqlite_asin(x):
if x is None:
return None
return asin(x)
def _sqlite_atan(x):
if x is None:
return None
return atan(x)
def _sqlite_atan2(y, x):
if y is None or x is None:
return None
return atan2(y, x)
def _sqlite_bitxor(x, y):
if x is None or y is None:
return None
return x ^ y
def _sqlite_ceiling(x):
if x is None:
return None
return ceil(x)
def _sqlite_cos(x):
if x is None:
return None
return cos(x)
def _sqlite_cot(x):
if x is None:
return None
return 1 / tan(x)
def _sqlite_degrees(x):
if x is None:
return None
return degrees(x)
def _sqlite_exp(x):
if x is None:
return None
return exp(x)
def _sqlite_floor(x):
if x is None:
return None
return floor(x)
def _sqlite_ln(x):
if x is None:
return None
return log(x)
def _sqlite_log(base, x):
if base is None or x is None:
return None
# Arguments reversed to match SQL standard.
return log(x, base)
def _sqlite_lpad(text, length, fill_text):
if text is None or length is None or fill_text is None:
return None
delta = length - len(text)
if delta <= 0:
return text[:length]
return (fill_text * length)[:delta] + text
def _sqlite_md5(text):
if text is None:
return None
return md5(text.encode()).hexdigest()
def _sqlite_mod(x, y):
if x is None or y is None:
return None
return fmod(x, y)
def _sqlite_pi():
return pi
def _sqlite_power(x, y):
if x is None or y is None:
return None
return x**y
def _sqlite_radians(x):
if x is None:
return None
return radians(x)
def _sqlite_repeat(text, count):
if text is None or count is None:
return None
return text * count
def _sqlite_reverse(text):
if text is None:
return None
return text[::-1]
def _sqlite_rpad(text, length, fill_text):
if text is None or length is None or fill_text is None:
return None
return (text + fill_text * length)[:length]
def _sqlite_sha1(text):
if text is None:
return None
return sha1(text.encode()).hexdigest()
def _sqlite_sha224(text):
if text is None:
return None
return sha224(text.encode()).hexdigest()
def _sqlite_sha256(text):
if text is None:
return None
return sha256(text.encode()).hexdigest()
def _sqlite_sha384(text):
if text is None:
return None
return sha384(text.encode()).hexdigest()
def _sqlite_sha512(text):
if text is None:
return None
return sha512(text.encode()).hexdigest()
def _sqlite_sign(x):
if x is None:
return None
return (x > 0) - (x < 0)
def _sqlite_sin(x):
if x is None:
return None
return sin(x)
def _sqlite_sqrt(x):
if x is None:
return None
return sqrt(x)
def _sqlite_tan(x):
if x is None:
return None
return tan(x)
class ListAggregate(list):
step = list.append
class StdDevPop(ListAggregate):
finalize = statistics.pstdev
class StdDevSamp(ListAggregate):
finalize = statistics.stdev
class VarPop(ListAggregate):
finalize = statistics.pvariance
class VarSamp(ListAggregate):
finalize = statistics.variance | PypiClean |
/BRAILS-3.0.1.tar.gz/BRAILS-3.0.1/brails/legacy/workflow/Images.py | import os
import random
from multiprocessing.dummy import Pool as ThreadPool
import requests
from pathlib import Path
from functools import lru_cache
@lru_cache(maxsize=None)
def validateGoogleMapsAPI(key: str)->bool:
"""Validate a Google Maps API key.
The `@lru_cache` decorator automatically creates a
cache for API values so that a validation process
will only be run the first time the function is
called.
`bool(key)` will be false for both the empty
string, `''`, and `None` values. This function
should be expanded.
"""
return bool(key) and key != 'put-your-key-here'
def capturePic(browser, picname):
try:
localurl = browser.save_screenshot(picname)
print("%s : Success" % localurl)
except BaseException as msg:
print("Fail:%s" % msg)
def download(urls):
xcount = 0
nlimit = 1e10
reDownload = urls[0][5]
for ls in urls:
urlTop = ls[0]
urlStreet = ls[1]
#lon = ls[2]
#lat = ls[3]
addr = ls[2]
cats = ls[3]
imgDir = ls[4]
#reDownload = ls[5]
'''
if rooftype not in roofcat:
print('not in roofcat')
continue
'''
#if not os.path.exists(thisFileDir):
# os.makedirs(thisFileDir)
#numoffiles = len(os.listdir(thisFileDir))
if xcount < nlimit: #numoffiles < maxNumofRoofImgs:
for cat in cats:
if cat == 'StreetView': trueURL = urlStreet
elif cat == 'TopView': trueURL = urlTop
if type(addr) == str:
addrstr = addr.replace(' ','-')
picname = Path(f'{imgDir}/{cat}/{cat}x{addrstr}.png')
else:
lon, lat = '%.6f'%addr[0], '%.6f'%addr[1]
#picname = thisFileDir + '/{prefix}x{lon}x{lat}.png'.format(prefix='StreetView',lon=lon,lat=lat)
picname = Path(f'{imgDir}/{cat}/{cat}x{lon}x{lat}.png')
exist = os.path.exists(picname)
if not exist or (exist and reDownload):
r = requests.get(trueURL)
f = open(picname, 'wb')
f.write(r.content)
f.close()
xcount += 1
if os.path.getsize(picname)/1024 < 9:
#print(urlStreet)
#print(f"empty image from API: ", addr)
pass
#exit() # empty image from API
else:
break
# construct urls
def getGoogleImages(footprints=None, GoogleMapAPIKey='',imageTypes=['StreetView','TopView'],imgDir='',ncpu=2,fov=60,pitch=0,reDownloadImgs=False):
if footprints is None:
raise ValueError('Please provide footprints')
if not validateGoogleMapsAPI(GoogleMapAPIKey):
raise ValueError('Invalid GoogleMapAPIKey.')
for imgType in imageTypes:
tmpImgeDir = os.path.join(imgDir, imgType)
if not os.path.exists(tmpImgeDir): os.makedirs(tmpImgeDir)
# APIs
baseurl_streetview = "https://maps.googleapis.com/maps/api/streetview?size=640x640&location={lat},{lon}&fov={fov}&pitch={pitch}&source=outdoor&key="+GoogleMapAPIKey
# consider using 256x256 to save disk
baseurl_satellite="https://maps.googleapis.com/maps/api/staticmap?center={lat},{lon}&zoom=20&scale=1&size=256x256&maptype=satellite&key="+GoogleMapAPIKey+"&format=png&visual_refresh=true"
#footprints = gpd.read_file(BuildingFootPrintsFileName)
urls = []
for ind, row in footprints.iterrows():
o = row['geometry'].centroid
lon, lat = '%.6f'%o.x, '%.6f'%o.y
# a top view
urlTop = baseurl_satellite.format(lat=lat,lon=lon)
urlStreet = baseurl_streetview.format(lat=lat,lon=lon,fov=fov,pitch=pitch)
cats = imageTypes
reDownload = 1 if reDownloadImgs else 0
urls.append([urlTop,urlStreet,[o.x, o.y],cats,imgDir,reDownload])
#print('shuffling...')
#random.shuffle(urls)
#print('shuffled...')
# divide urls into small chunks
#ncpu = 4
step = int(len(urls)/ncpu)+1
chunks = [urls[x:x+step] for x in range(0, len(urls), step)]
print('Downloading images from Google API ...')
# get some workers
pool = ThreadPool(ncpu)
# send job to workers
results = pool.map(download, chunks)
# jobs are done, clean the site
pool.close()
pool.join()
print('Images downloaded ...')
def getGoogleImagesByAddrOrCoord(Addrs=None, GoogleMapAPIKey='',imageTypes=['StreetView','TopView'],imgDir='',ncpu=2,fov=60,pitch=0,reDownloadImgs=False):
if Addrs is None:
raise ValueError('Please provide Addrs')
if not validateGoogleMapsAPI(GoogleMapAPIKey):
raise ValueError('Invalid GoogleMapAPIKey.')
for imgType in imageTypes:
tmpImgeDir = os.path.join(imgDir, imgType)
if not os.path.exists(tmpImgeDir): os.makedirs(tmpImgeDir)
# APIs
baseurl_streetview_addr = "https://maps.googleapis.com/maps/api/streetview?size=640x640&location={addr}&fov={fov}&pitch={pitch}&source=outdoor&key="+GoogleMapAPIKey
baseurl_streetview_coord = "https://maps.googleapis.com/maps/api/streetview?size=640x640&location={lat},{lon}&fov={fov}&pitch={pitch}&source=outdoor&key="+GoogleMapAPIKey
# consider using 256x256 to save disk
baseurl_satellite_addr="https://maps.googleapis.com/maps/api/staticmap?center={addr}&zoom=20&scale=1&size=256x256&maptype=satellite&key="+GoogleMapAPIKey+"&format=png&visual_refresh=true"
baseurl_satellite_coord="https://maps.googleapis.com/maps/api/staticmap?center={lat},{lon}&zoom=20&scale=1&size=256x256&maptype=satellite&key="+GoogleMapAPIKey+"&format=png&visual_refresh=true"
#footprints = gpd.read_file(BuildingFootPrintsFileName)
urls = []
for addr in Addrs:
if type(addr) == str:
urlTop = baseurl_satellite_addr.format(addr=addr)
urlStreet = baseurl_streetview_addr.format(addr=addr,fov=fov,pitch=pitch)
else:
lon, lat = '%.6f'%addr[0], '%.6f'%addr[1]
urlTop = baseurl_satellite_coord.format(lat=lat,lon=lon)
urlStreet = baseurl_streetview_coord.format(lat=lat,lon=lon,fov=fov,pitch=pitch)
cats = imageTypes
reDownload = 1 if reDownloadImgs else 0
urls.append([urlTop,urlStreet,addr,cats,imgDir,reDownload])
#print('shuffling...')
#random.shuffle(urls)
#print('shuffled...')
# divide urls into small chunks
#ncpu = 4
step = int(len(urls)/ncpu)+1
chunks = [urls[x:x+step] for x in range(0, len(urls), step)]
print('Downloading images from Google API ...')
# get some workers
pool = ThreadPool(ncpu)
# send job to workers
results = pool.map(download, chunks)
# jobs are done, clean the site
pool.close()
pool.join()
print('Images downloaded ...') | PypiClean |
/BasicLibrary.PY-0.5.12.tar.gz/BasicLibrary.PY-0.5.12/BasicLibrary/dataBase/databaseClient.py | from BasicLibrary.configHelper import ConfigHelper as ch
from BasicLibrary.data.dictHelper import DictHelper
from BasicLibrary.dataBase.databaseDDL import DatabaseDDL
from BasicLibrary.dataBase.databaseMate import DatabaseMate
from BasicLibrary.model.container import Container
class DatabaseClient:
"""
向外暴露的主要类型接口
"""
@classmethod
def __get_db_type_name(cls):
type_name = ch.get_item("db_type", "type_name", "MySql")
return type_name
@classmethod
def get_mate(cls, table_name):
"""
这是向外暴露的主要方法接口
获取跟数据库(表信息)交互的对象
:param table_name:
:return:
"""
mate_dict = Container.get_dict("mate_dict")
if DictHelper.is_contains_key(mate_dict, table_name):
return mate_dict[table_name]
else:
type_name = cls.__get_db_type_name()
package_name = "hilandBasicLibrary.dataBase.{0}.mate".format(type_name)
module = __import__(package_name, fromlist=["Mate"])
mate = module.Mate(table_name)
mate_dict[table_name] = mate
if isinstance(mate, DatabaseMate):
return mate
else:
return None
@classmethod
def get_ddl(cls):
"""
这是向外暴露的操作数据库结构的方法接口
:return:
"""
ddl_key = "__database_ddl__"
ddl = Container.get_item(ddl_key)
if ddl is None:
ddl = cls.__get_ddl_detail()
Container.set_item(ddl_key, ddl)
if isinstance(ddl, DatabaseDDL):
return ddl
else:
return None
@classmethod
def __get_ddl_detail(cls):
type_name = cls.__get_db_type_name()
package_name = "hilandBasicLibrary.dataBase.{0}.ddl".format(type_name)
module = __import__(package_name, fromlist=["DDL"])
ddl = module.DDL()
return ddl
# if __name__ == '__main__':
# print(__MateContainer.mate_dict)
# __MateContainer.mate_dict['a'] = 'AA'
# __MateContainer.mate_dict['b'] = 'BB'
# print(__MateContainer.mate_dict)
#
# result = DictHelper.contain_key(__MateContainer.mate_dict, 'c')
# print(result)
# result = DictHelper.contain_key(__MateContainer.mate_dict, 'b')
# print(result)
# # print(AAA)
# # print(AAA())
# _mate = get_mate("dp_demo")
# _mate.hello("Mr.Xie")
# _result = _mate.find_one({"class": "一", "age": {"$gt": 20}})
# print(_result)
#
# _result = _mate.find_many({"class": "一"})
# print(_result)
# _entity_data = {"name": "宋8", "age": 28, "class": "三"}
# _result = _mate.insert_one(_entity_data)
# print(_result)
# _entity_list = [{"name": "宋10", "age": 28, "class": "三"}, {"name": "宋11", "age": 28, "class": "三"}, {"name": "宋12", "age": 28, "class": "三"}]
# _result = _mate.insert_many(_entity_list)
# print(_result)
# _condition = {"name": "宋8"}
# _result = _mate.delete_many(_condition)
# print(_result)
# _condition = {"name": "宋8"}
# _result = _mate.delete_one(_condition)
# print(_result)
# _condition = {"name": "宋10"}
# _fixing = {"age": 32}
# _result = _mate.update_many(_fixing, _condition)
# print(_result)
# _condition = {"name": "宋10"}
# _fixing = {"age": 32}
# _result = _mate.update_one(_fixing, _condition)
# print(_result)
# _condition = {"name": "宋10"}
# _condition = {}
# _result = _mate.query_count(_condition)
# print(_result)
# _condition = {"name": "宋30"}
# _entity = {"name": "宋30", "age": 28, "class": "三"}
# _result = _mate.insert_one_non_duplication(_entity, _condition)
# print(_result)
# _condition = {"name": "宋31"}
# _entity = [{"name": "宋31", "age": 28, "class": "三"}, {"name": "宋32", "age": 29, "class": "三"}]
# _result = _mate.insert_many_non_duplication(_entity, _condition)
# print(_result)
# _result = _mate.get_max("age", {"name": "宋12"})
# print(_result)
#
# _result = _mate.get_min("age")
# print(_result)
#
# _result = _mate.get_min("age", {"name": "宋10"})
# print(_result)
# _result = _mate.find_like("name", "宋1", "before")
# print(_result)
#
# _result = _mate.find_like("name", "五", "after")
# print(_result)
#
# _result = _mate.find_like("name", "五")
# print(_result)
# _result = _mate.find_more("age", 25)
# print(_result)
# _result = _mate.find_less("age", 25)
# print(_result)
# _result = _mate.find_between("age", 25, 29)
# print(_result)
# type_name = __get_db_type_name() + "Mate"
# module = __import__(type_name)
# _mate = module.Mate
# _mate.hello("China") | PypiClean |
/LFT-0.1.1-py3-none-any.whl/lft/event/mediators/delayed_event_mediator.py | import asyncio
from typing import Set, Optional
from lft.event import (Event, EventSimulator, EventMediator,
EventInstantMediatorExecutor, EventReplayerMediatorExecutor, EventRecorderMediatorExecutor)
__all__ = ("DelayedHandlerMixin", "DelayedEventMediator", "DelayedHandler", "DelayedEventInstantMediatorExecutor",
"DelayedEventRecorderMediatorExecutor", "DelayedEventReplayerMediatorExecutor")
class DelayedHandlerMixin:
def __init__(self):
self.handlers: Set['DelayedHandler'] = set()
def _handle(self,
loop: asyncio.AbstractEventLoop,
delay: float,
event: Event,
event_simulator: EventSimulator):
delayed_handler = DelayedHandler()
self.handlers.add(delayed_handler)
loop = loop or asyncio.get_event_loop()
timer_handler = loop.call_later(delay, delayed_handler)
delayed_handler.event = event
delayed_handler.event_simulator = event_simulator
delayed_handler.timer_handler = timer_handler
delayed_handler.handlers = self.handlers
class DelayedEventInstantMediatorExecutor(EventInstantMediatorExecutor, DelayedHandlerMixin):
def execute(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
_is_valid_event(event)
self._handle(loop, delay, event, self._event_simulator)
async def execute_async(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
return self.execute(delay, event, loop)
class DelayedEventRecorderMediatorExecutor(EventRecorderMediatorExecutor, DelayedHandlerMixin):
def execute(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
_is_valid_event(event)
self._handle(loop, delay, event, self._event_recorder.event_simulator)
async def execute_async(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
return self.execute(delay, event, loop)
class DelayedEventReplayerMediatorExecutor(EventReplayerMediatorExecutor):
def execute(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
# do nothing
_is_valid_event(event)
async def execute_async(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
return self.execute(delay, event, loop)
class DelayedEventMediator(EventMediator):
InstantExecutorType = DelayedEventInstantMediatorExecutor
RecorderExecutorType = DelayedEventRecorderMediatorExecutor
ReplayerExecutorType = DelayedEventReplayerMediatorExecutor
def execute(self, delay: float, event: Event, loop: asyncio.AbstractEventLoop=None):
return super().execute(delay=delay, event=event, loop=loop)
def _is_valid_event(event: Event):
if event.deterministic:
raise RuntimeError(f"Delayed event must not be deterministic :{event.serialize()}")
class DelayedHandler:
def __init__(self):
self.event: Optional[Event] = None
self.event_simulator: Optional[EventSimulator] = None
self.timer_handler: Optional[asyncio.TimerHandle] = None
self.handlers: Optional[Set['DelayedHandler']] = None
def __call__(self):
self.handlers.remove(self)
self.event_simulator.raise_event(self.event) | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py | _base_ = [
'../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'
]
num_things_classes = 80
num_stuff_classes = 53
num_classes = num_things_classes + num_stuff_classes
model = dict(
type='Mask2Former',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
panoptic_head=dict(
type='Mask2FormerHead',
in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside
strides=[4, 8, 16, 32],
feat_channels=256,
out_channels=256,
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
num_queries=100,
num_transformer_feat_level=3,
pixel_decoder=dict(
type='MSDeformAttnPixelDecoder',
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiScaleDeformableAttention',
embed_dims=256,
num_heads=8,
num_levels=3,
num_points=4,
im2col_step=64,
dropout=0.0,
batch_first=False,
norm_cfg=None,
init_cfg=None),
ffn_cfgs=dict(
type='FFN',
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True)),
operation_order=('self_attn', 'norm', 'ffn', 'norm')),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
init_cfg=None),
enforce_decoder_input_project=False,
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
transformer_decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=9,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
attn_drop=0.0,
proj_drop=0.0,
dropout_layer=None,
batch_first=False),
ffn_cfgs=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.0,
dropout_layer=None,
add_identity=True),
feedforward_channels=2048,
operation_order=('cross_attn', 'norm', 'self_attn', 'norm',
'ffn', 'norm')),
init_cfg=None),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
reduction='mean',
class_weight=[1.0] * num_classes + [0.1]),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
reduction='mean',
naive_dice=True,
eps=1.0,
loss_weight=5.0)),
panoptic_fusion_head=dict(
type='MaskFormerFusionHead',
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_panoptic=None,
init_cfg=None),
train_cfg=dict(
num_points=12544,
oversample_ratio=3.0,
importance_sample_ratio=0.75,
assigner=dict(
type='MaskHungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=2.0),
mask_cost=dict(
type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),
dice_cost=dict(
type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),
sampler=dict(type='MaskPseudoSampler')),
test_cfg=dict(
panoptic_on=True,
# For now, the dataset does not support
# evaluating semantic segmentation metric.
semantic_on=False,
instance_on=True,
# max_per_image is for instance segmentation.
max_per_image=100,
iou_thr=0.8,
# In Mask2Former's panoptic postprocessing,
# it will filter mask area where score is less than 0.5 .
filter_low_score=True),
init_cfg=None)
# dataset settings
image_size = (1024, 1024)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='RandomFlip', flip_ratio=0.5),
# large scale jittering
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=image_size),
dict(type='DefaultFormatBundle', img_to_float=True),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data_root = 'data/coco/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(
pipeline=test_pipeline,
ins_ann_file=data_root + 'annotations/instances_val2017.json',
),
test=dict(
pipeline=test_pipeline,
ins_ann_file=data_root + 'annotations/instances_val2017.json',
))
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi,
},
norm_decay_mult=0.0))
optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
gamma=0.1,
by_epoch=False,
step=[327778, 355092],
warmup='linear',
warmup_by_epoch=False,
warmup_ratio=1.0, # no warmup
warmup_iters=10)
max_iters = 368750
runner = dict(type='IterBasedRunner', max_iters=max_iters)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
interval = 5000
workflow = [('train', interval)]
checkpoint_config = dict(
by_epoch=False, interval=interval, save_last=True, max_keep_ckpts=3)
# Before 365001th iteration, we do evaluation every 5000 iterations.
# After 365000th iteration, we do evaluation every 368750 iterations,
# which means that we do evaluation at the end of training.
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
evaluation = dict(
interval=interval,
dynamic_intervals=dynamic_intervals,
metric=['PQ', 'bbox', 'segm']) | PypiClean |
/Hikka_TL_New-2.0.4-py3-none-any.whl/hikkatl/tl/functions/messages.py | from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeChatBannedRights, TypeChatReactions, TypeDataJSON, TypeDialogFilter, TypeInlineBotSwitchPM, TypeInlineBotWebView, TypeInputBotApp, TypeInputBotInlineMessageID, TypeInputBotInlineResult, TypeInputChatPhoto, TypeInputCheckPasswordSRP, TypeInputDialogPeer, TypeInputDocument, TypeInputEncryptedChat, TypeInputEncryptedFile, TypeInputFile, TypeInputGeoPoint, TypeInputMedia, TypeInputMessage, TypeInputPeer, TypeInputSingleMedia, TypeInputStickerSet, TypeInputStickeredMedia, TypeInputUser, TypeInputWallPaper, TypeMessageEntity, TypeMessagesFilter, TypeReaction, TypeReplyMarkup, TypeReportReason, TypeSendMessageAction, TypeShippingOption, TypeTextWithEntities, TypeWallPaperSettings
class AcceptEncryptionRequest(TLRequest):
CONSTRUCTOR_ID = 0x3dbc0415
SUBCLASS_OF_ID = 0x6d28a37a
def __init__(self, peer: 'TypeInputEncryptedChat', g_b: bytes, key_fingerprint: int):
"""
:returns EncryptedChat: Instance of either EncryptedChatEmpty, EncryptedChatWaiting, EncryptedChatRequested, EncryptedChat, EncryptedChatDiscarded.
"""
self.peer = peer
self.g_b = g_b
self.key_fingerprint = key_fingerprint
def to_dict(self):
return {
'_': 'AcceptEncryptionRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'g_b': self.g_b,
'key_fingerprint': self.key_fingerprint
}
def _bytes(self):
return b''.join((
b'\x15\x04\xbc=',
self.peer._bytes(),
self.serialize_bytes(self.g_b),
struct.pack('<q', self.key_fingerprint),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_g_b = reader.tgread_bytes()
_key_fingerprint = reader.read_long()
return cls(peer=_peer, g_b=_g_b, key_fingerprint=_key_fingerprint)
class AcceptUrlAuthRequest(TLRequest):
CONSTRUCTOR_ID = 0xb12c7125
SUBCLASS_OF_ID = 0x7765cb1e
def __init__(self, write_allowed: Optional[bool]=None, peer: Optional['TypeInputPeer']=None, msg_id: Optional[int]=None, button_id: Optional[int]=None, url: Optional[str]=None):
"""
:returns UrlAuthResult: Instance of either UrlAuthResultRequest, UrlAuthResultAccepted, UrlAuthResultDefault.
"""
self.write_allowed = write_allowed
self.peer = peer
self.msg_id = msg_id
self.button_id = button_id
self.url = url
async def resolve(self, client, utils):
if self.peer:
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'AcceptUrlAuthRequest',
'write_allowed': self.write_allowed,
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'button_id': self.button_id,
'url': self.url
}
def _bytes(self):
assert ((self.peer or self.peer is not None) and (self.msg_id or self.msg_id is not None) and (self.button_id or self.button_id is not None)) or ((self.peer is None or self.peer is False) and (self.msg_id is None or self.msg_id is False) and (self.button_id is None or self.button_id is False)), 'peer, msg_id, button_id parameters must all be False-y (like None) or all me True-y'
return b''.join((
b'%q,\xb1',
struct.pack('<I', (0 if self.write_allowed is None or self.write_allowed is False else 1) | (0 if self.peer is None or self.peer is False else 2) | (0 if self.msg_id is None or self.msg_id is False else 2) | (0 if self.button_id is None or self.button_id is False else 2) | (0 if self.url is None or self.url is False else 4)),
b'' if self.peer is None or self.peer is False else (self.peer._bytes()),
b'' if self.msg_id is None or self.msg_id is False else (struct.pack('<i', self.msg_id)),
b'' if self.button_id is None or self.button_id is False else (struct.pack('<i', self.button_id)),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_write_allowed = bool(flags & 1)
if flags & 2:
_peer = reader.tgread_object()
else:
_peer = None
if flags & 2:
_msg_id = reader.read_int()
else:
_msg_id = None
if flags & 2:
_button_id = reader.read_int()
else:
_button_id = None
if flags & 4:
_url = reader.tgread_string()
else:
_url = None
return cls(write_allowed=_write_allowed, peer=_peer, msg_id=_msg_id, button_id=_button_id, url=_url)
class AddChatUserRequest(TLRequest):
CONSTRUCTOR_ID = 0xf24753e3
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, chat_id: int, user_id: 'TypeInputUser', fwd_limit: int):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.chat_id = chat_id
self.user_id = user_id
self.fwd_limit = fwd_limit
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'AddChatUserRequest',
'chat_id': self.chat_id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'fwd_limit': self.fwd_limit
}
def _bytes(self):
return b''.join((
b'\xe3SG\xf2',
struct.pack('<q', self.chat_id),
self.user_id._bytes(),
struct.pack('<i', self.fwd_limit),
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
_user_id = reader.tgread_object()
_fwd_limit = reader.read_int()
return cls(chat_id=_chat_id, user_id=_user_id, fwd_limit=_fwd_limit)
class CheckChatInviteRequest(TLRequest):
CONSTRUCTOR_ID = 0x3eadb1bb
SUBCLASS_OF_ID = 0x4561736
# noinspection PyShadowingBuiltins
def __init__(self, hash: str):
"""
:returns ChatInvite: Instance of either ChatInviteAlready, ChatInvite, ChatInvitePeek.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'CheckChatInviteRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xbb\xb1\xad>',
self.serialize_bytes(self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.tgread_string()
return cls(hash=_hash)
class CheckHistoryImportRequest(TLRequest):
CONSTRUCTOR_ID = 0x43fe19f3
SUBCLASS_OF_ID = 0x5bb2720b
def __init__(self, import_head: str):
"""
:returns messages.HistoryImportParsed: Instance of HistoryImportParsed.
"""
self.import_head = import_head
def to_dict(self):
return {
'_': 'CheckHistoryImportRequest',
'import_head': self.import_head
}
def _bytes(self):
return b''.join((
b'\xf3\x19\xfeC',
self.serialize_bytes(self.import_head),
))
@classmethod
def from_reader(cls, reader):
_import_head = reader.tgread_string()
return cls(import_head=_import_head)
class CheckHistoryImportPeerRequest(TLRequest):
CONSTRUCTOR_ID = 0x5dc60f03
SUBCLASS_OF_ID = 0xb84bb337
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns messages.CheckedHistoryImportPeer: Instance of CheckedHistoryImportPeer.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'CheckHistoryImportPeerRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'\x03\x0f\xc6]',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class ClearAllDraftsRequest(TLRequest):
CONSTRUCTOR_ID = 0x7e58ee9c
SUBCLASS_OF_ID = 0xf5b399ac
def to_dict(self):
return {
'_': 'ClearAllDraftsRequest'
}
def _bytes(self):
return b''.join((
b'\x9c\xeeX~',
))
@classmethod
def from_reader(cls, reader):
return cls()
class ClearRecentReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x9dfeefb4
SUBCLASS_OF_ID = 0xf5b399ac
def to_dict(self):
return {
'_': 'ClearRecentReactionsRequest'
}
def _bytes(self):
return b''.join((
b'\xb4\xef\xfe\x9d',
))
@classmethod
def from_reader(cls, reader):
return cls()
class ClearRecentStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x8999602d
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, attached: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.attached = attached
def to_dict(self):
return {
'_': 'ClearRecentStickersRequest',
'attached': self.attached
}
def _bytes(self):
return b''.join((
b'-`\x99\x89',
struct.pack('<I', (0 if self.attached is None or self.attached is False else 1)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_attached = bool(flags & 1)
return cls(attached=_attached)
class CreateChatRequest(TLRequest):
CONSTRUCTOR_ID = 0x34a818
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, users: List['TypeInputUser'], title: str, ttl_period: Optional[int]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.users = users
self.title = title
self.ttl_period = ttl_period
async def resolve(self, client, utils):
_tmp = []
for _x in self.users:
_tmp.append(utils.get_input_user(await client.get_input_entity(_x)))
self.users = _tmp
def to_dict(self):
return {
'_': 'CreateChatRequest',
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users],
'title': self.title,
'ttl_period': self.ttl_period
}
def _bytes(self):
return b''.join((
b'\x18\xa84\x00',
struct.pack('<I', (0 if self.ttl_period is None or self.ttl_period is False else 1)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
self.serialize_bytes(self.title),
b'' if self.ttl_period is None or self.ttl_period is False else (struct.pack('<i', self.ttl_period)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
_title = reader.tgread_string()
if flags & 1:
_ttl_period = reader.read_int()
else:
_ttl_period = None
return cls(users=_users, title=_title, ttl_period=_ttl_period)
class DeleteChatRequest(TLRequest):
CONSTRUCTOR_ID = 0x5bd0ee50
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, chat_id: int):
"""
:returns Bool: This type has no constructors.
"""
self.chat_id = chat_id
def to_dict(self):
return {
'_': 'DeleteChatRequest',
'chat_id': self.chat_id
}
def _bytes(self):
return b''.join((
b'P\xee\xd0[',
struct.pack('<q', self.chat_id),
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
return cls(chat_id=_chat_id)
class DeleteChatUserRequest(TLRequest):
CONSTRUCTOR_ID = 0xa2185cab
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, chat_id: int, user_id: 'TypeInputUser', revoke_history: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.chat_id = chat_id
self.user_id = user_id
self.revoke_history = revoke_history
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'DeleteChatUserRequest',
'chat_id': self.chat_id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'revoke_history': self.revoke_history
}
def _bytes(self):
return b''.join((
b'\xab\\\x18\xa2',
struct.pack('<I', (0 if self.revoke_history is None or self.revoke_history is False else 1)),
struct.pack('<q', self.chat_id),
self.user_id._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_revoke_history = bool(flags & 1)
_chat_id = reader.read_long()
_user_id = reader.tgread_object()
return cls(chat_id=_chat_id, user_id=_user_id, revoke_history=_revoke_history)
class DeleteExportedChatInviteRequest(TLRequest):
CONSTRUCTOR_ID = 0xd464a42b
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', link: str):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.link = link
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'DeleteExportedChatInviteRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'link': self.link
}
def _bytes(self):
return b''.join((
b'+\xa4d\xd4',
self.peer._bytes(),
self.serialize_bytes(self.link),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_link = reader.tgread_string()
return cls(peer=_peer, link=_link)
class DeleteHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0xb08f922a
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, peer: 'TypeInputPeer', max_id: int, just_clear: Optional[bool]=None, revoke: Optional[bool]=None, min_date: Optional[datetime]=None, max_date: Optional[datetime]=None):
"""
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
self.peer = peer
self.max_id = max_id
self.just_clear = just_clear
self.revoke = revoke
self.min_date = min_date
self.max_date = max_date
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'DeleteHistoryRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'max_id': self.max_id,
'just_clear': self.just_clear,
'revoke': self.revoke,
'min_date': self.min_date,
'max_date': self.max_date
}
def _bytes(self):
return b''.join((
b'*\x92\x8f\xb0',
struct.pack('<I', (0 if self.just_clear is None or self.just_clear is False else 1) | (0 if self.revoke is None or self.revoke is False else 2) | (0 if self.min_date is None or self.min_date is False else 4) | (0 if self.max_date is None or self.max_date is False else 8)),
self.peer._bytes(),
struct.pack('<i', self.max_id),
b'' if self.min_date is None or self.min_date is False else (self.serialize_datetime(self.min_date)),
b'' if self.max_date is None or self.max_date is False else (self.serialize_datetime(self.max_date)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_just_clear = bool(flags & 1)
_revoke = bool(flags & 2)
_peer = reader.tgread_object()
_max_id = reader.read_int()
if flags & 4:
_min_date = reader.tgread_date()
else:
_min_date = None
if flags & 8:
_max_date = reader.tgread_date()
else:
_max_date = None
return cls(peer=_peer, max_id=_max_id, just_clear=_just_clear, revoke=_revoke, min_date=_min_date, max_date=_max_date)
class DeleteMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xe58e95d2
SUBCLASS_OF_ID = 0xced3c06e
# noinspection PyShadowingBuiltins
def __init__(self, id: List[int], revoke: Optional[bool]=None):
"""
:returns messages.AffectedMessages: Instance of AffectedMessages.
"""
self.id = id
self.revoke = revoke
def to_dict(self):
return {
'_': 'DeleteMessagesRequest',
'id': [] if self.id is None else self.id[:],
'revoke': self.revoke
}
def _bytes(self):
return b''.join((
b'\xd2\x95\x8e\xe5',
struct.pack('<I', (0 if self.revoke is None or self.revoke is False else 1)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_revoke = bool(flags & 1)
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(id=_id, revoke=_revoke)
class DeletePhoneCallHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0xf9cbe409
SUBCLASS_OF_ID = 0xf817652e
def __init__(self, revoke: Optional[bool]=None):
"""
:returns messages.AffectedFoundMessages: Instance of AffectedFoundMessages.
"""
self.revoke = revoke
def to_dict(self):
return {
'_': 'DeletePhoneCallHistoryRequest',
'revoke': self.revoke
}
def _bytes(self):
return b''.join((
b'\t\xe4\xcb\xf9',
struct.pack('<I', (0 if self.revoke is None or self.revoke is False else 1)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_revoke = bool(flags & 1)
return cls(revoke=_revoke)
class DeleteRevokedExportedChatInvitesRequest(TLRequest):
CONSTRUCTOR_ID = 0x56987bd5
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', admin_id: 'TypeInputUser'):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.admin_id = admin_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.admin_id = utils.get_input_user(await client.get_input_entity(self.admin_id))
def to_dict(self):
return {
'_': 'DeleteRevokedExportedChatInvitesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'admin_id': self.admin_id.to_dict() if isinstance(self.admin_id, TLObject) else self.admin_id
}
def _bytes(self):
return b''.join((
b'\xd5{\x98V',
self.peer._bytes(),
self.admin_id._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_admin_id = reader.tgread_object()
return cls(peer=_peer, admin_id=_admin_id)
class DeleteScheduledMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x59ae2b16
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int]):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'DeleteScheduledMessagesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'\x16+\xaeY',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(peer=_peer, id=_id)
class DiscardEncryptionRequest(TLRequest):
CONSTRUCTOR_ID = 0xf393aea0
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, chat_id: int, delete_history: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.chat_id = chat_id
self.delete_history = delete_history
def to_dict(self):
return {
'_': 'DiscardEncryptionRequest',
'chat_id': self.chat_id,
'delete_history': self.delete_history
}
def _bytes(self):
return b''.join((
b'\xa0\xae\x93\xf3',
struct.pack('<I', (0 if self.delete_history is None or self.delete_history is False else 1)),
struct.pack('<i', self.chat_id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_delete_history = bool(flags & 1)
_chat_id = reader.read_int()
return cls(chat_id=_chat_id, delete_history=_delete_history)
class EditChatAboutRequest(TLRequest):
CONSTRUCTOR_ID = 0xdef60797
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', about: str):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.about = about
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'EditChatAboutRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'about': self.about
}
def _bytes(self):
return b''.join((
b'\x97\x07\xf6\xde',
self.peer._bytes(),
self.serialize_bytes(self.about),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_about = reader.tgread_string()
return cls(peer=_peer, about=_about)
class EditChatAdminRequest(TLRequest):
CONSTRUCTOR_ID = 0xa85bd1c2
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, chat_id: int, user_id: 'TypeInputUser', is_admin: bool):
"""
:returns Bool: This type has no constructors.
"""
self.chat_id = chat_id
self.user_id = user_id
self.is_admin = is_admin
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditChatAdminRequest',
'chat_id': self.chat_id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'is_admin': self.is_admin
}
def _bytes(self):
return b''.join((
b'\xc2\xd1[\xa8',
struct.pack('<q', self.chat_id),
self.user_id._bytes(),
b'\xb5ur\x99' if self.is_admin else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
_user_id = reader.tgread_object()
_is_admin = reader.tgread_bool()
return cls(chat_id=_chat_id, user_id=_user_id, is_admin=_is_admin)
class EditChatDefaultBannedRightsRequest(TLRequest):
CONSTRUCTOR_ID = 0xa5866b41
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', banned_rights: 'TypeChatBannedRights'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.banned_rights = banned_rights
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'EditChatDefaultBannedRightsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'banned_rights': self.banned_rights.to_dict() if isinstance(self.banned_rights, TLObject) else self.banned_rights
}
def _bytes(self):
return b''.join((
b'Ak\x86\xa5',
self.peer._bytes(),
self.banned_rights._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_banned_rights = reader.tgread_object()
return cls(peer=_peer, banned_rights=_banned_rights)
class EditChatPhotoRequest(TLRequest):
CONSTRUCTOR_ID = 0x35ddd674
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, chat_id: int, photo: 'TypeInputChatPhoto'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.chat_id = chat_id
self.photo = photo
async def resolve(self, client, utils):
self.photo = utils.get_input_chat_photo(self.photo)
def to_dict(self):
return {
'_': 'EditChatPhotoRequest',
'chat_id': self.chat_id,
'photo': self.photo.to_dict() if isinstance(self.photo, TLObject) else self.photo
}
def _bytes(self):
return b''.join((
b't\xd6\xdd5',
struct.pack('<q', self.chat_id),
self.photo._bytes(),
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
_photo = reader.tgread_object()
return cls(chat_id=_chat_id, photo=_photo)
class EditChatTitleRequest(TLRequest):
CONSTRUCTOR_ID = 0x73783ffd
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, chat_id: int, title: str):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.chat_id = chat_id
self.title = title
def to_dict(self):
return {
'_': 'EditChatTitleRequest',
'chat_id': self.chat_id,
'title': self.title
}
def _bytes(self):
return b''.join((
b'\xfd?xs',
struct.pack('<q', self.chat_id),
self.serialize_bytes(self.title),
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
_title = reader.tgread_string()
return cls(chat_id=_chat_id, title=_title)
class EditExportedChatInviteRequest(TLRequest):
CONSTRUCTOR_ID = 0xbdca2f75
SUBCLASS_OF_ID = 0x82dcd4ca
def __init__(self, peer: 'TypeInputPeer', link: str, revoked: Optional[bool]=None, expire_date: Optional[datetime]=None, usage_limit: Optional[int]=None, request_needed: Optional[bool]=None, title: Optional[str]=None):
"""
:returns messages.ExportedChatInvite: Instance of either ExportedChatInvite, ExportedChatInviteReplaced.
"""
self.peer = peer
self.link = link
self.revoked = revoked
self.expire_date = expire_date
self.usage_limit = usage_limit
self.request_needed = request_needed
self.title = title
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'EditExportedChatInviteRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'link': self.link,
'revoked': self.revoked,
'expire_date': self.expire_date,
'usage_limit': self.usage_limit,
'request_needed': self.request_needed,
'title': self.title
}
def _bytes(self):
return b''.join((
b'u/\xca\xbd',
struct.pack('<I', (0 if self.revoked is None or self.revoked is False else 4) | (0 if self.expire_date is None or self.expire_date is False else 1) | (0 if self.usage_limit is None or self.usage_limit is False else 2) | (0 if self.request_needed is None else 8) | (0 if self.title is None or self.title is False else 16)),
self.peer._bytes(),
self.serialize_bytes(self.link),
b'' if self.expire_date is None or self.expire_date is False else (self.serialize_datetime(self.expire_date)),
b'' if self.usage_limit is None or self.usage_limit is False else (struct.pack('<i', self.usage_limit)),
b'' if self.request_needed is None else (b'\xb5ur\x99' if self.request_needed else b'7\x97y\xbc'),
b'' if self.title is None or self.title is False else (self.serialize_bytes(self.title)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_revoked = bool(flags & 4)
_peer = reader.tgread_object()
_link = reader.tgread_string()
if flags & 1:
_expire_date = reader.tgread_date()
else:
_expire_date = None
if flags & 2:
_usage_limit = reader.read_int()
else:
_usage_limit = None
if flags & 8:
_request_needed = reader.tgread_bool()
else:
_request_needed = None
if flags & 16:
_title = reader.tgread_string()
else:
_title = None
return cls(peer=_peer, link=_link, revoked=_revoked, expire_date=_expire_date, usage_limit=_usage_limit, request_needed=_request_needed, title=_title)
class EditInlineBotMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0x83557dba
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeInputBotInlineMessageID', no_webpage: Optional[bool]=None, message: Optional[str]=None, media: Optional['TypeInputMedia']=None, reply_markup: Optional['TypeReplyMarkup']=None, entities: Optional[List['TypeMessageEntity']]=None):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
self.no_webpage = no_webpage
self.message = message
self.media = media
self.reply_markup = reply_markup
self.entities = entities
async def resolve(self, client, utils):
if self.media:
self.media = utils.get_input_media(self.media)
def to_dict(self):
return {
'_': 'EditInlineBotMessageRequest',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'no_webpage': self.no_webpage,
'message': self.message,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'reply_markup': self.reply_markup.to_dict() if isinstance(self.reply_markup, TLObject) else self.reply_markup,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities]
}
def _bytes(self):
return b''.join((
b'\xba}U\x83',
struct.pack('<I', (0 if self.no_webpage is None or self.no_webpage is False else 2) | (0 if self.message is None or self.message is False else 2048) | (0 if self.media is None or self.media is False else 16384) | (0 if self.reply_markup is None or self.reply_markup is False else 4) | (0 if self.entities is None or self.entities is False else 8)),
self.id._bytes(),
b'' if self.message is None or self.message is False else (self.serialize_bytes(self.message)),
b'' if self.media is None or self.media is False else (self.media._bytes()),
b'' if self.reply_markup is None or self.reply_markup is False else (self.reply_markup._bytes()),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_no_webpage = bool(flags & 2)
_id = reader.tgread_object()
if flags & 2048:
_message = reader.tgread_string()
else:
_message = None
if flags & 16384:
_media = reader.tgread_object()
else:
_media = None
if flags & 4:
_reply_markup = reader.tgread_object()
else:
_reply_markup = None
if flags & 8:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
return cls(id=_id, no_webpage=_no_webpage, message=_message, media=_media, reply_markup=_reply_markup, entities=_entities)
class EditMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0x48f71778
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, no_webpage: Optional[bool]=None, message: Optional[str]=None, media: Optional['TypeInputMedia']=None, reply_markup: Optional['TypeReplyMarkup']=None, entities: Optional[List['TypeMessageEntity']]=None, schedule_date: Optional[datetime]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
self.no_webpage = no_webpage
self.message = message
self.media = media
self.reply_markup = reply_markup
self.entities = entities
self.schedule_date = schedule_date
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
if self.media:
self.media = utils.get_input_media(self.media)
def to_dict(self):
return {
'_': 'EditMessageRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'no_webpage': self.no_webpage,
'message': self.message,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'reply_markup': self.reply_markup.to_dict() if isinstance(self.reply_markup, TLObject) else self.reply_markup,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'schedule_date': self.schedule_date
}
def _bytes(self):
return b''.join((
b'x\x17\xf7H',
struct.pack('<I', (0 if self.no_webpage is None or self.no_webpage is False else 2) | (0 if self.message is None or self.message is False else 2048) | (0 if self.media is None or self.media is False else 16384) | (0 if self.reply_markup is None or self.reply_markup is False else 4) | (0 if self.entities is None or self.entities is False else 8) | (0 if self.schedule_date is None or self.schedule_date is False else 32768)),
self.peer._bytes(),
struct.pack('<i', self.id),
b'' if self.message is None or self.message is False else (self.serialize_bytes(self.message)),
b'' if self.media is None or self.media is False else (self.media._bytes()),
b'' if self.reply_markup is None or self.reply_markup is False else (self.reply_markup._bytes()),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
b'' if self.schedule_date is None or self.schedule_date is False else (self.serialize_datetime(self.schedule_date)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_no_webpage = bool(flags & 2)
_peer = reader.tgread_object()
_id = reader.read_int()
if flags & 2048:
_message = reader.tgread_string()
else:
_message = None
if flags & 16384:
_media = reader.tgread_object()
else:
_media = None
if flags & 4:
_reply_markup = reader.tgread_object()
else:
_reply_markup = None
if flags & 8:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
if flags & 32768:
_schedule_date = reader.tgread_date()
else:
_schedule_date = None
return cls(peer=_peer, id=_id, no_webpage=_no_webpage, message=_message, media=_media, reply_markup=_reply_markup, entities=_entities, schedule_date=_schedule_date)
class ExportChatInviteRequest(TLRequest):
CONSTRUCTOR_ID = 0xa02ce5d5
SUBCLASS_OF_ID = 0xb4748a58
def __init__(self, peer: 'TypeInputPeer', legacy_revoke_permanent: Optional[bool]=None, request_needed: Optional[bool]=None, expire_date: Optional[datetime]=None, usage_limit: Optional[int]=None, title: Optional[str]=None):
"""
:returns ExportedChatInvite: Instance of either ChatInviteExported, ChatInvitePublicJoinRequests.
"""
self.peer = peer
self.legacy_revoke_permanent = legacy_revoke_permanent
self.request_needed = request_needed
self.expire_date = expire_date
self.usage_limit = usage_limit
self.title = title
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ExportChatInviteRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'legacy_revoke_permanent': self.legacy_revoke_permanent,
'request_needed': self.request_needed,
'expire_date': self.expire_date,
'usage_limit': self.usage_limit,
'title': self.title
}
def _bytes(self):
return b''.join((
b'\xd5\xe5,\xa0',
struct.pack('<I', (0 if self.legacy_revoke_permanent is None or self.legacy_revoke_permanent is False else 4) | (0 if self.request_needed is None or self.request_needed is False else 8) | (0 if self.expire_date is None or self.expire_date is False else 1) | (0 if self.usage_limit is None or self.usage_limit is False else 2) | (0 if self.title is None or self.title is False else 16)),
self.peer._bytes(),
b'' if self.expire_date is None or self.expire_date is False else (self.serialize_datetime(self.expire_date)),
b'' if self.usage_limit is None or self.usage_limit is False else (struct.pack('<i', self.usage_limit)),
b'' if self.title is None or self.title is False else (self.serialize_bytes(self.title)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_legacy_revoke_permanent = bool(flags & 4)
_request_needed = bool(flags & 8)
_peer = reader.tgread_object()
if flags & 1:
_expire_date = reader.tgread_date()
else:
_expire_date = None
if flags & 2:
_usage_limit = reader.read_int()
else:
_usage_limit = None
if flags & 16:
_title = reader.tgread_string()
else:
_title = None
return cls(peer=_peer, legacy_revoke_permanent=_legacy_revoke_permanent, request_needed=_request_needed, expire_date=_expire_date, usage_limit=_usage_limit, title=_title)
class FaveStickerRequest(TLRequest):
CONSTRUCTOR_ID = 0xb9ffc55b
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeInputDocument', unfave: bool):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
self.unfave = unfave
async def resolve(self, client, utils):
self.id = utils.get_input_document(self.id)
def to_dict(self):
return {
'_': 'FaveStickerRequest',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'unfave': self.unfave
}
def _bytes(self):
return b''.join((
b'[\xc5\xff\xb9',
self.id._bytes(),
b'\xb5ur\x99' if self.unfave else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_id = reader.tgread_object()
_unfave = reader.tgread_bool()
return cls(id=_id, unfave=_unfave)
class ForwardMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xc661bbc4
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, from_peer: 'TypeInputPeer', id: List[int], to_peer: 'TypeInputPeer', silent: Optional[bool]=None, background: Optional[bool]=None, with_my_score: Optional[bool]=None, drop_author: Optional[bool]=None, drop_media_captions: Optional[bool]=None, noforwards: Optional[bool]=None, random_id: List[int]=None, top_msg_id: Optional[int]=None, schedule_date: Optional[datetime]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.from_peer = from_peer
self.id = id
self.to_peer = to_peer
self.silent = silent
self.background = background
self.with_my_score = with_my_score
self.drop_author = drop_author
self.drop_media_captions = drop_media_captions
self.noforwards = noforwards
self.random_id = random_id if random_id is not None else [int.from_bytes(os.urandom(8), 'big', signed=True) for _ in range(len(id))]
self.top_msg_id = top_msg_id
self.schedule_date = schedule_date
self.send_as = send_as
async def resolve(self, client, utils):
self.from_peer = utils.get_input_peer(await client.get_input_entity(self.from_peer))
self.to_peer = utils.get_input_peer(await client.get_input_entity(self.to_peer))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'ForwardMessagesRequest',
'from_peer': self.from_peer.to_dict() if isinstance(self.from_peer, TLObject) else self.from_peer,
'id': [] if self.id is None else self.id[:],
'to_peer': self.to_peer.to_dict() if isinstance(self.to_peer, TLObject) else self.to_peer,
'silent': self.silent,
'background': self.background,
'with_my_score': self.with_my_score,
'drop_author': self.drop_author,
'drop_media_captions': self.drop_media_captions,
'noforwards': self.noforwards,
'random_id': [] if self.random_id is None else self.random_id[:],
'top_msg_id': self.top_msg_id,
'schedule_date': self.schedule_date,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\xc4\xbba\xc6',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 32) | (0 if self.background is None or self.background is False else 64) | (0 if self.with_my_score is None or self.with_my_score is False else 256) | (0 if self.drop_author is None or self.drop_author is False else 2048) | (0 if self.drop_media_captions is None or self.drop_media_captions is False else 4096) | (0 if self.noforwards is None or self.noforwards is False else 16384) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.schedule_date is None or self.schedule_date is False else 1024) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.from_peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.random_id)),b''.join(struct.pack('<q', x) for x in self.random_id),
self.to_peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
b'' if self.schedule_date is None or self.schedule_date is False else (self.serialize_datetime(self.schedule_date)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 32)
_background = bool(flags & 64)
_with_my_score = bool(flags & 256)
_drop_author = bool(flags & 2048)
_drop_media_captions = bool(flags & 4096)
_noforwards = bool(flags & 16384)
_from_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
reader.read_int()
_random_id = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_random_id.append(_x)
_to_peer = reader.tgread_object()
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
if flags & 1024:
_schedule_date = reader.tgread_date()
else:
_schedule_date = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(from_peer=_from_peer, id=_id, to_peer=_to_peer, silent=_silent, background=_background, with_my_score=_with_my_score, drop_author=_drop_author, drop_media_captions=_drop_media_captions, noforwards=_noforwards, random_id=_random_id, top_msg_id=_top_msg_id, schedule_date=_schedule_date, send_as=_send_as)
class GetAdminsWithInvitesRequest(TLRequest):
CONSTRUCTOR_ID = 0x3920e6ef
SUBCLASS_OF_ID = 0x8f5bad2b
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns messages.ChatAdminsWithInvites: Instance of ChatAdminsWithInvites.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetAdminsWithInvitesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'\xef\xe6 9',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class GetAllChatsRequest(TLRequest):
CONSTRUCTOR_ID = 0x875f74be
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, except_ids: List[int]):
"""
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.except_ids = except_ids
def to_dict(self):
return {
'_': 'GetAllChatsRequest',
'except_ids': [] if self.except_ids is None else self.except_ids[:]
}
def _bytes(self):
return b''.join((
b'\xbet_\x87',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.except_ids)),b''.join(struct.pack('<q', x) for x in self.except_ids),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_except_ids = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_except_ids.append(_x)
return cls(except_ids=_except_ids)
class GetAllDraftsRequest(TLRequest):
CONSTRUCTOR_ID = 0x6a3f8d65
SUBCLASS_OF_ID = 0x8af52aac
def to_dict(self):
return {
'_': 'GetAllDraftsRequest'
}
def _bytes(self):
return b''.join((
b'e\x8d?j',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetAllStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0xb8a0a1a8
SUBCLASS_OF_ID = 0x45834829
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.AllStickers: Instance of either AllStickersNotModified, AllStickers.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetAllStickersRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xa8\xa1\xa0\xb8',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetArchivedStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x57f17692
SUBCLASS_OF_ID = 0x7296d771
def __init__(self, offset_id: int, limit: int, masks: Optional[bool]=None, emojis: Optional[bool]=None):
"""
:returns messages.ArchivedStickers: Instance of ArchivedStickers.
"""
self.offset_id = offset_id
self.limit = limit
self.masks = masks
self.emojis = emojis
def to_dict(self):
return {
'_': 'GetArchivedStickersRequest',
'offset_id': self.offset_id,
'limit': self.limit,
'masks': self.masks,
'emojis': self.emojis
}
def _bytes(self):
return b''.join((
b'\x92v\xf1W',
struct.pack('<I', (0 if self.masks is None or self.masks is False else 1) | (0 if self.emojis is None or self.emojis is False else 2)),
struct.pack('<q', self.offset_id),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_masks = bool(flags & 1)
_emojis = bool(flags & 2)
_offset_id = reader.read_long()
_limit = reader.read_int()
return cls(offset_id=_offset_id, limit=_limit, masks=_masks, emojis=_emojis)
class GetAttachMenuBotRequest(TLRequest):
CONSTRUCTOR_ID = 0x77216192
SUBCLASS_OF_ID = 0xdb33883d
def __init__(self, bot: 'TypeInputUser'):
"""
:returns AttachMenuBotsBot: Instance of AttachMenuBotsBot.
"""
self.bot = bot
async def resolve(self, client, utils):
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
def to_dict(self):
return {
'_': 'GetAttachMenuBotRequest',
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot
}
def _bytes(self):
return b''.join((
b'\x92a!w',
self.bot._bytes(),
))
@classmethod
def from_reader(cls, reader):
_bot = reader.tgread_object()
return cls(bot=_bot)
class GetAttachMenuBotsRequest(TLRequest):
CONSTRUCTOR_ID = 0x16fcc2cb
SUBCLASS_OF_ID = 0x842e23da
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns AttachMenuBots: Instance of either AttachMenuBotsNotModified, AttachMenuBots.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetAttachMenuBotsRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xcb\xc2\xfc\x16',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetAttachedStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0xcc5b67cc
SUBCLASS_OF_ID = 0xcc125f6b
def __init__(self, media: 'TypeInputStickeredMedia'):
"""
:returns Vector<StickerSetCovered>: This type has no constructors.
"""
self.media = media
def to_dict(self):
return {
'_': 'GetAttachedStickersRequest',
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media
}
def _bytes(self):
return b''.join((
b'\xccg[\xcc',
self.media._bytes(),
))
@classmethod
def from_reader(cls, reader):
_media = reader.tgread_object()
return cls(media=_media)
class GetAvailableReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x18dea0ac
SUBCLASS_OF_ID = 0xe426ad82
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.AvailableReactions: Instance of either AvailableReactionsNotModified, AvailableReactions.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetAvailableReactionsRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xac\xa0\xde\x18',
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
return cls(hash=_hash)
class GetBotAppRequest(TLRequest):
CONSTRUCTOR_ID = 0x34fdc5c3
SUBCLASS_OF_ID = 0x8f7243a7
# noinspection PyShadowingBuiltins
def __init__(self, app: 'TypeInputBotApp', hash: int):
"""
:returns messages.BotApp: Instance of BotApp.
"""
self.app = app
self.hash = hash
def to_dict(self):
return {
'_': 'GetBotAppRequest',
'app': self.app.to_dict() if isinstance(self.app, TLObject) else self.app,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xc3\xc5\xfd4',
self.app._bytes(),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_app = reader.tgread_object()
_hash = reader.read_long()
return cls(app=_app, hash=_hash)
class GetBotCallbackAnswerRequest(TLRequest):
CONSTRUCTOR_ID = 0x9342ca07
SUBCLASS_OF_ID = 0x6c4dd18c
def __init__(self, peer: 'TypeInputPeer', msg_id: int, game: Optional[bool]=None, data: Optional[bytes]=None, password: Optional['TypeInputCheckPasswordSRP']=None):
"""
:returns messages.BotCallbackAnswer: Instance of BotCallbackAnswer.
"""
self.peer = peer
self.msg_id = msg_id
self.game = game
self.data = data
self.password = password
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetBotCallbackAnswerRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'game': self.game,
'data': self.data,
'password': self.password.to_dict() if isinstance(self.password, TLObject) else self.password
}
def _bytes(self):
return b''.join((
b'\x07\xcaB\x93',
struct.pack('<I', (0 if self.game is None or self.game is False else 2) | (0 if self.data is None or self.data is False else 1) | (0 if self.password is None or self.password is False else 4)),
self.peer._bytes(),
struct.pack('<i', self.msg_id),
b'' if self.data is None or self.data is False else (self.serialize_bytes(self.data)),
b'' if self.password is None or self.password is False else (self.password._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_game = bool(flags & 2)
_peer = reader.tgread_object()
_msg_id = reader.read_int()
if flags & 1:
_data = reader.tgread_bytes()
else:
_data = None
if flags & 4:
_password = reader.tgread_object()
else:
_password = None
return cls(peer=_peer, msg_id=_msg_id, game=_game, data=_data, password=_password)
class GetChatInviteImportersRequest(TLRequest):
CONSTRUCTOR_ID = 0xdf04dd4e
SUBCLASS_OF_ID = 0xd9bc8aa6
def __init__(self, peer: 'TypeInputPeer', offset_date: Optional[datetime], offset_user: 'TypeInputUser', limit: int, requested: Optional[bool]=None, link: Optional[str]=None, q: Optional[str]=None):
"""
:returns messages.ChatInviteImporters: Instance of ChatInviteImporters.
"""
self.peer = peer
self.offset_date = offset_date
self.offset_user = offset_user
self.limit = limit
self.requested = requested
self.link = link
self.q = q
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.offset_user = utils.get_input_user(await client.get_input_entity(self.offset_user))
def to_dict(self):
return {
'_': 'GetChatInviteImportersRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'offset_date': self.offset_date,
'offset_user': self.offset_user.to_dict() if isinstance(self.offset_user, TLObject) else self.offset_user,
'limit': self.limit,
'requested': self.requested,
'link': self.link,
'q': self.q
}
def _bytes(self):
return b''.join((
b'N\xdd\x04\xdf',
struct.pack('<I', (0 if self.requested is None or self.requested is False else 1) | (0 if self.link is None or self.link is False else 2) | (0 if self.q is None or self.q is False else 4)),
self.peer._bytes(),
b'' if self.link is None or self.link is False else (self.serialize_bytes(self.link)),
b'' if self.q is None or self.q is False else (self.serialize_bytes(self.q)),
self.serialize_datetime(self.offset_date),
self.offset_user._bytes(),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_requested = bool(flags & 1)
_peer = reader.tgread_object()
if flags & 2:
_link = reader.tgread_string()
else:
_link = None
if flags & 4:
_q = reader.tgread_string()
else:
_q = None
_offset_date = reader.tgread_date()
_offset_user = reader.tgread_object()
_limit = reader.read_int()
return cls(peer=_peer, offset_date=_offset_date, offset_user=_offset_user, limit=_limit, requested=_requested, link=_link, q=_q)
class GetChatsRequest(TLRequest):
CONSTRUCTOR_ID = 0x49e9528f
SUBCLASS_OF_ID = 0x99d5cb14
# noinspection PyShadowingBuiltins
def __init__(self, id: List[int]):
"""
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.id = id
def to_dict(self):
return {
'_': 'GetChatsRequest',
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'\x8fR\xe9I',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<q', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_id.append(_x)
return cls(id=_id)
class GetCommonChatsRequest(TLRequest):
CONSTRUCTOR_ID = 0xe40ca104
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, user_id: 'TypeInputUser', max_id: int, limit: int):
"""
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.user_id = user_id
self.max_id = max_id
self.limit = limit
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'GetCommonChatsRequest',
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'max_id': self.max_id,
'limit': self.limit
}
def _bytes(self):
return b''.join((
b'\x04\xa1\x0c\xe4',
self.user_id._bytes(),
struct.pack('<q', self.max_id),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
_user_id = reader.tgread_object()
_max_id = reader.read_long()
_limit = reader.read_int()
return cls(user_id=_user_id, max_id=_max_id, limit=_limit)
class GetCustomEmojiDocumentsRequest(TLRequest):
CONSTRUCTOR_ID = 0xd9ab0f54
SUBCLASS_OF_ID = 0xcc590e08
def __init__(self, document_id: List[int]):
"""
:returns Vector<Document>: This type has no constructors.
"""
self.document_id = document_id
def to_dict(self):
return {
'_': 'GetCustomEmojiDocumentsRequest',
'document_id': [] if self.document_id is None else self.document_id[:]
}
def _bytes(self):
return b''.join((
b'T\x0f\xab\xd9',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.document_id)),b''.join(struct.pack('<q', x) for x in self.document_id),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_document_id = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_document_id.append(_x)
return cls(document_id=_document_id)
class GetDefaultHistoryTTLRequest(TLRequest):
CONSTRUCTOR_ID = 0x658b7188
SUBCLASS_OF_ID = 0xf00d3367
def to_dict(self):
return {
'_': 'GetDefaultHistoryTTLRequest'
}
def _bytes(self):
return b''.join((
b'\x88q\x8be',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetDhConfigRequest(TLRequest):
CONSTRUCTOR_ID = 0x26cf8950
SUBCLASS_OF_ID = 0xe488ed8b
def __init__(self, version: int, random_length: int):
"""
:returns messages.DhConfig: Instance of either DhConfigNotModified, DhConfig.
"""
self.version = version
self.random_length = random_length
def to_dict(self):
return {
'_': 'GetDhConfigRequest',
'version': self.version,
'random_length': self.random_length
}
def _bytes(self):
return b''.join((
b'P\x89\xcf&',
struct.pack('<i', self.version),
struct.pack('<i', self.random_length),
))
@classmethod
def from_reader(cls, reader):
_version = reader.read_int()
_random_length = reader.read_int()
return cls(version=_version, random_length=_random_length)
class GetDialogFiltersRequest(TLRequest):
CONSTRUCTOR_ID = 0xf19ed96d
SUBCLASS_OF_ID = 0x601ce94d
def to_dict(self):
return {
'_': 'GetDialogFiltersRequest'
}
def _bytes(self):
return b''.join((
b'm\xd9\x9e\xf1',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetDialogUnreadMarksRequest(TLRequest):
CONSTRUCTOR_ID = 0x22e24e22
SUBCLASS_OF_ID = 0xbec64ad9
def to_dict(self):
return {
'_': 'GetDialogUnreadMarksRequest'
}
def _bytes(self):
return b''.join((
b'"N\xe2"',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetDialogsRequest(TLRequest):
CONSTRUCTOR_ID = 0xa0f4cb4f
SUBCLASS_OF_ID = 0xe1b52ee
# noinspection PyShadowingBuiltins
def __init__(self, offset_date: Optional[datetime], offset_id: int, offset_peer: 'TypeInputPeer', limit: int, hash: int, exclude_pinned: Optional[bool]=None, folder_id: Optional[int]=None):
"""
:returns messages.Dialogs: Instance of either Dialogs, DialogsSlice, DialogsNotModified.
"""
self.offset_date = offset_date
self.offset_id = offset_id
self.offset_peer = offset_peer
self.limit = limit
self.hash = hash
self.exclude_pinned = exclude_pinned
self.folder_id = folder_id
async def resolve(self, client, utils):
self.offset_peer = utils.get_input_peer(await client.get_input_entity(self.offset_peer))
def to_dict(self):
return {
'_': 'GetDialogsRequest',
'offset_date': self.offset_date,
'offset_id': self.offset_id,
'offset_peer': self.offset_peer.to_dict() if isinstance(self.offset_peer, TLObject) else self.offset_peer,
'limit': self.limit,
'hash': self.hash,
'exclude_pinned': self.exclude_pinned,
'folder_id': self.folder_id
}
def _bytes(self):
return b''.join((
b'O\xcb\xf4\xa0',
struct.pack('<I', (0 if self.exclude_pinned is None or self.exclude_pinned is False else 1) | (0 if self.folder_id is None or self.folder_id is False else 2)),
b'' if self.folder_id is None or self.folder_id is False else (struct.pack('<i', self.folder_id)),
self.serialize_datetime(self.offset_date),
struct.pack('<i', self.offset_id),
self.offset_peer._bytes(),
struct.pack('<i', self.limit),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_exclude_pinned = bool(flags & 1)
if flags & 2:
_folder_id = reader.read_int()
else:
_folder_id = None
_offset_date = reader.tgread_date()
_offset_id = reader.read_int()
_offset_peer = reader.tgread_object()
_limit = reader.read_int()
_hash = reader.read_long()
return cls(offset_date=_offset_date, offset_id=_offset_id, offset_peer=_offset_peer, limit=_limit, hash=_hash, exclude_pinned=_exclude_pinned, folder_id=_folder_id)
class GetDiscussionMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0x446972fd
SUBCLASS_OF_ID = 0x53f8e3e8
def __init__(self, peer: 'TypeInputPeer', msg_id: int):
"""
:returns messages.DiscussionMessage: Instance of DiscussionMessage.
"""
self.peer = peer
self.msg_id = msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetDiscussionMessageRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id
}
def _bytes(self):
return b''.join((
b'\xfdriD',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
return cls(peer=_peer, msg_id=_msg_id)
class GetDocumentByHashRequest(TLRequest):
CONSTRUCTOR_ID = 0xb1f2061f
SUBCLASS_OF_ID = 0x211fe820
def __init__(self, sha256: bytes, size: int, mime_type: str):
"""
:returns Document: Instance of either DocumentEmpty, Document.
"""
self.sha256 = sha256
self.size = size
self.mime_type = mime_type
def to_dict(self):
return {
'_': 'GetDocumentByHashRequest',
'sha256': self.sha256,
'size': self.size,
'mime_type': self.mime_type
}
def _bytes(self):
return b''.join((
b'\x1f\x06\xf2\xb1',
self.serialize_bytes(self.sha256),
struct.pack('<q', self.size),
self.serialize_bytes(self.mime_type),
))
@classmethod
def from_reader(cls, reader):
_sha256 = reader.tgread_bytes()
_size = reader.read_long()
_mime_type = reader.tgread_string()
return cls(sha256=_sha256, size=_size, mime_type=_mime_type)
class GetEmojiGroupsRequest(TLRequest):
CONSTRUCTOR_ID = 0x7488ce5b
SUBCLASS_OF_ID = 0x7eca55d9
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.EmojiGroups: Instance of either EmojiGroupsNotModified, EmojiGroups.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetEmojiGroupsRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'[\xce\x88t',
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
return cls(hash=_hash)
class GetEmojiKeywordsRequest(TLRequest):
CONSTRUCTOR_ID = 0x35a0e062
SUBCLASS_OF_ID = 0xd279c672
def __init__(self, lang_code: str):
"""
:returns EmojiKeywordsDifference: Instance of EmojiKeywordsDifference.
"""
self.lang_code = lang_code
def to_dict(self):
return {
'_': 'GetEmojiKeywordsRequest',
'lang_code': self.lang_code
}
def _bytes(self):
return b''.join((
b'b\xe0\xa05',
self.serialize_bytes(self.lang_code),
))
@classmethod
def from_reader(cls, reader):
_lang_code = reader.tgread_string()
return cls(lang_code=_lang_code)
class GetEmojiKeywordsDifferenceRequest(TLRequest):
CONSTRUCTOR_ID = 0x1508b6af
SUBCLASS_OF_ID = 0xd279c672
def __init__(self, lang_code: str, from_version: int):
"""
:returns EmojiKeywordsDifference: Instance of EmojiKeywordsDifference.
"""
self.lang_code = lang_code
self.from_version = from_version
def to_dict(self):
return {
'_': 'GetEmojiKeywordsDifferenceRequest',
'lang_code': self.lang_code,
'from_version': self.from_version
}
def _bytes(self):
return b''.join((
b'\xaf\xb6\x08\x15',
self.serialize_bytes(self.lang_code),
struct.pack('<i', self.from_version),
))
@classmethod
def from_reader(cls, reader):
_lang_code = reader.tgread_string()
_from_version = reader.read_int()
return cls(lang_code=_lang_code, from_version=_from_version)
class GetEmojiKeywordsLanguagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x4e9963b2
SUBCLASS_OF_ID = 0xe795d387
def __init__(self, lang_codes: List[str]):
"""
:returns Vector<EmojiLanguage>: This type has no constructors.
"""
self.lang_codes = lang_codes
def to_dict(self):
return {
'_': 'GetEmojiKeywordsLanguagesRequest',
'lang_codes': [] if self.lang_codes is None else self.lang_codes[:]
}
def _bytes(self):
return b''.join((
b'\xb2c\x99N',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.lang_codes)),b''.join(self.serialize_bytes(x) for x in self.lang_codes),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_lang_codes = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_lang_codes.append(_x)
return cls(lang_codes=_lang_codes)
class GetEmojiProfilePhotoGroupsRequest(TLRequest):
CONSTRUCTOR_ID = 0x21a548f3
SUBCLASS_OF_ID = 0x7eca55d9
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.EmojiGroups: Instance of either EmojiGroupsNotModified, EmojiGroups.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetEmojiProfilePhotoGroupsRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xf3H\xa5!',
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
return cls(hash=_hash)
class GetEmojiStatusGroupsRequest(TLRequest):
CONSTRUCTOR_ID = 0x2ecd56cd
SUBCLASS_OF_ID = 0x7eca55d9
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.EmojiGroups: Instance of either EmojiGroupsNotModified, EmojiGroups.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetEmojiStatusGroupsRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xcdV\xcd.',
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_int()
return cls(hash=_hash)
class GetEmojiStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0xfbfca18f
SUBCLASS_OF_ID = 0x45834829
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.AllStickers: Instance of either AllStickersNotModified, AllStickers.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetEmojiStickersRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x8f\xa1\xfc\xfb',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetEmojiURLRequest(TLRequest):
CONSTRUCTOR_ID = 0xd5b10c26
SUBCLASS_OF_ID = 0x1fa08a19
def __init__(self, lang_code: str):
"""
:returns EmojiURL: Instance of EmojiURL.
"""
self.lang_code = lang_code
def to_dict(self):
return {
'_': 'GetEmojiURLRequest',
'lang_code': self.lang_code
}
def _bytes(self):
return b''.join((
b'&\x0c\xb1\xd5',
self.serialize_bytes(self.lang_code),
))
@classmethod
def from_reader(cls, reader):
_lang_code = reader.tgread_string()
return cls(lang_code=_lang_code)
class GetExportedChatInviteRequest(TLRequest):
CONSTRUCTOR_ID = 0x73746f5c
SUBCLASS_OF_ID = 0x82dcd4ca
def __init__(self, peer: 'TypeInputPeer', link: str):
"""
:returns messages.ExportedChatInvite: Instance of either ExportedChatInvite, ExportedChatInviteReplaced.
"""
self.peer = peer
self.link = link
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetExportedChatInviteRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'link': self.link
}
def _bytes(self):
return b''.join((
b'\\ots',
self.peer._bytes(),
self.serialize_bytes(self.link),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_link = reader.tgread_string()
return cls(peer=_peer, link=_link)
class GetExportedChatInvitesRequest(TLRequest):
CONSTRUCTOR_ID = 0xa2b5a3f6
SUBCLASS_OF_ID = 0x603d3871
def __init__(self, peer: 'TypeInputPeer', admin_id: 'TypeInputUser', limit: int, revoked: Optional[bool]=None, offset_date: Optional[datetime]=None, offset_link: Optional[str]=None):
"""
:returns messages.ExportedChatInvites: Instance of ExportedChatInvites.
"""
self.peer = peer
self.admin_id = admin_id
self.limit = limit
self.revoked = revoked
self.offset_date = offset_date
self.offset_link = offset_link
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.admin_id = utils.get_input_user(await client.get_input_entity(self.admin_id))
def to_dict(self):
return {
'_': 'GetExportedChatInvitesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'admin_id': self.admin_id.to_dict() if isinstance(self.admin_id, TLObject) else self.admin_id,
'limit': self.limit,
'revoked': self.revoked,
'offset_date': self.offset_date,
'offset_link': self.offset_link
}
def _bytes(self):
assert ((self.offset_date or self.offset_date is not None) and (self.offset_link or self.offset_link is not None)) or ((self.offset_date is None or self.offset_date is False) and (self.offset_link is None or self.offset_link is False)), 'offset_date, offset_link parameters must all be False-y (like None) or all me True-y'
return b''.join((
b'\xf6\xa3\xb5\xa2',
struct.pack('<I', (0 if self.revoked is None or self.revoked is False else 8) | (0 if self.offset_date is None or self.offset_date is False else 4) | (0 if self.offset_link is None or self.offset_link is False else 4)),
self.peer._bytes(),
self.admin_id._bytes(),
b'' if self.offset_date is None or self.offset_date is False else (self.serialize_datetime(self.offset_date)),
b'' if self.offset_link is None or self.offset_link is False else (self.serialize_bytes(self.offset_link)),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_revoked = bool(flags & 8)
_peer = reader.tgread_object()
_admin_id = reader.tgread_object()
if flags & 4:
_offset_date = reader.tgread_date()
else:
_offset_date = None
if flags & 4:
_offset_link = reader.tgread_string()
else:
_offset_link = None
_limit = reader.read_int()
return cls(peer=_peer, admin_id=_admin_id, limit=_limit, revoked=_revoked, offset_date=_offset_date, offset_link=_offset_link)
class GetExtendedMediaRequest(TLRequest):
CONSTRUCTOR_ID = 0x84f80814
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int]):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetExtendedMediaRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'\x14\x08\xf8\x84',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(peer=_peer, id=_id)
class GetFavedStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x4f1aaa9
SUBCLASS_OF_ID = 0x8e736fb9
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.FavedStickers: Instance of either FavedStickersNotModified, FavedStickers.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetFavedStickersRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xa9\xaa\xf1\x04',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetFeaturedEmojiStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0xecf6736
SUBCLASS_OF_ID = 0x2614b722
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.FeaturedStickers: Instance of either FeaturedStickersNotModified, FeaturedStickers.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetFeaturedEmojiStickersRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'6g\xcf\x0e',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetFeaturedStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x64780b14
SUBCLASS_OF_ID = 0x2614b722
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.FeaturedStickers: Instance of either FeaturedStickersNotModified, FeaturedStickers.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetFeaturedStickersRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x14\x0bxd',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetFullChatRequest(TLRequest):
CONSTRUCTOR_ID = 0xaeb00b34
SUBCLASS_OF_ID = 0x225a5109
def __init__(self, chat_id: int):
"""
:returns messages.ChatFull: Instance of ChatFull.
"""
self.chat_id = chat_id
def to_dict(self):
return {
'_': 'GetFullChatRequest',
'chat_id': self.chat_id
}
def _bytes(self):
return b''.join((
b'4\x0b\xb0\xae',
struct.pack('<q', self.chat_id),
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
return cls(chat_id=_chat_id)
class GetGameHighScoresRequest(TLRequest):
CONSTRUCTOR_ID = 0xe822649d
SUBCLASS_OF_ID = 0x6ccd95fd
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, user_id: 'TypeInputUser'):
"""
:returns messages.HighScores: Instance of HighScores.
"""
self.peer = peer
self.id = id
self.user_id = user_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'GetGameHighScoresRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id
}
def _bytes(self):
return b''.join((
b'\x9dd"\xe8',
self.peer._bytes(),
struct.pack('<i', self.id),
self.user_id._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_id = reader.read_int()
_user_id = reader.tgread_object()
return cls(peer=_peer, id=_id, user_id=_user_id)
class GetHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0x4423e6c5
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', offset_id: int, offset_date: Optional[datetime], add_offset: int, limit: int, max_id: int, min_id: int, hash: int):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.offset_id = offset_id
self.offset_date = offset_date
self.add_offset = add_offset
self.limit = limit
self.max_id = max_id
self.min_id = min_id
self.hash = hash
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetHistoryRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'offset_id': self.offset_id,
'offset_date': self.offset_date,
'add_offset': self.add_offset,
'limit': self.limit,
'max_id': self.max_id,
'min_id': self.min_id,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xc5\xe6#D',
self.peer._bytes(),
struct.pack('<i', self.offset_id),
self.serialize_datetime(self.offset_date),
struct.pack('<i', self.add_offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.max_id),
struct.pack('<i', self.min_id),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_offset_id = reader.read_int()
_offset_date = reader.tgread_date()
_add_offset = reader.read_int()
_limit = reader.read_int()
_max_id = reader.read_int()
_min_id = reader.read_int()
_hash = reader.read_long()
return cls(peer=_peer, offset_id=_offset_id, offset_date=_offset_date, add_offset=_add_offset, limit=_limit, max_id=_max_id, min_id=_min_id, hash=_hash)
class GetInlineBotResultsRequest(TLRequest):
CONSTRUCTOR_ID = 0x514e999d
SUBCLASS_OF_ID = 0x3ed4d9c9
def __init__(self, bot: 'TypeInputUser', peer: 'TypeInputPeer', query: str, offset: str, geo_point: Optional['TypeInputGeoPoint']=None):
"""
:returns messages.BotResults: Instance of BotResults.
"""
self.bot = bot
self.peer = peer
self.query = query
self.offset = offset
self.geo_point = geo_point
async def resolve(self, client, utils):
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetInlineBotResultsRequest',
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'query': self.query,
'offset': self.offset,
'geo_point': self.geo_point.to_dict() if isinstance(self.geo_point, TLObject) else self.geo_point
}
def _bytes(self):
return b''.join((
b'\x9d\x99NQ',
struct.pack('<I', (0 if self.geo_point is None or self.geo_point is False else 1)),
self.bot._bytes(),
self.peer._bytes(),
b'' if self.geo_point is None or self.geo_point is False else (self.geo_point._bytes()),
self.serialize_bytes(self.query),
self.serialize_bytes(self.offset),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_bot = reader.tgread_object()
_peer = reader.tgread_object()
if flags & 1:
_geo_point = reader.tgread_object()
else:
_geo_point = None
_query = reader.tgread_string()
_offset = reader.tgread_string()
return cls(bot=_bot, peer=_peer, query=_query, offset=_offset, geo_point=_geo_point)
class GetInlineGameHighScoresRequest(TLRequest):
CONSTRUCTOR_ID = 0xf635e1b
SUBCLASS_OF_ID = 0x6ccd95fd
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeInputBotInlineMessageID', user_id: 'TypeInputUser'):
"""
:returns messages.HighScores: Instance of HighScores.
"""
self.id = id
self.user_id = user_id
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'GetInlineGameHighScoresRequest',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id
}
def _bytes(self):
return b''.join((
b'\x1b^c\x0f',
self.id._bytes(),
self.user_id._bytes(),
))
@classmethod
def from_reader(cls, reader):
_id = reader.tgread_object()
_user_id = reader.tgread_object()
return cls(id=_id, user_id=_user_id)
class GetMaskStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x640f82b8
SUBCLASS_OF_ID = 0x45834829
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.AllStickers: Instance of either AllStickersNotModified, AllStickers.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetMaskStickersRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xb8\x82\x0fd',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetMessageEditDataRequest(TLRequest):
CONSTRUCTOR_ID = 0xfda68d36
SUBCLASS_OF_ID = 0xfb47949d
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int):
"""
:returns messages.MessageEditData: Instance of MessageEditData.
"""
self.peer = peer
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetMessageEditDataRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id
}
def _bytes(self):
return b''.join((
b'6\x8d\xa6\xfd',
self.peer._bytes(),
struct.pack('<i', self.id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_id = reader.read_int()
return cls(peer=_peer, id=_id)
class GetMessageReactionsListRequest(TLRequest):
CONSTRUCTOR_ID = 0x461b3f48
SUBCLASS_OF_ID = 0x60fce5e6
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, limit: int, reaction: Optional['TypeReaction']=None, offset: Optional[str]=None):
"""
:returns messages.MessageReactionsList: Instance of MessageReactionsList.
"""
self.peer = peer
self.id = id
self.limit = limit
self.reaction = reaction
self.offset = offset
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetMessageReactionsListRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'limit': self.limit,
'reaction': self.reaction.to_dict() if isinstance(self.reaction, TLObject) else self.reaction,
'offset': self.offset
}
def _bytes(self):
return b''.join((
b'H?\x1bF',
struct.pack('<I', (0 if self.reaction is None or self.reaction is False else 1) | (0 if self.offset is None or self.offset is False else 2)),
self.peer._bytes(),
struct.pack('<i', self.id),
b'' if self.reaction is None or self.reaction is False else (self.reaction._bytes()),
b'' if self.offset is None or self.offset is False else (self.serialize_bytes(self.offset)),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
_id = reader.read_int()
if flags & 1:
_reaction = reader.tgread_object()
else:
_reaction = None
if flags & 2:
_offset = reader.tgread_string()
else:
_offset = None
_limit = reader.read_int()
return cls(peer=_peer, id=_id, limit=_limit, reaction=_reaction, offset=_offset)
class GetMessageReadParticipantsRequest(TLRequest):
CONSTRUCTOR_ID = 0x31c1c44f
SUBCLASS_OF_ID = 0x21ca455b
def __init__(self, peer: 'TypeInputPeer', msg_id: int):
"""
:returns Vector<ReadParticipantDate>: This type has no constructors.
"""
self.peer = peer
self.msg_id = msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetMessageReadParticipantsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id
}
def _bytes(self):
return b''.join((
b'O\xc4\xc11',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
return cls(peer=_peer, msg_id=_msg_id)
class GetMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x63c66506
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, id: List['TypeInputMessage']):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.id = id
async def resolve(self, client, utils):
_tmp = []
for _x in self.id:
_tmp.append(utils.get_input_message(_x))
self.id = _tmp
def to_dict(self):
return {
'_': 'GetMessagesRequest',
'id': [] if self.id is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.id]
}
def _bytes(self):
return b''.join((
b'\x06e\xc6c',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(x._bytes() for x in self.id),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_id.append(_x)
return cls(id=_id)
class GetMessagesReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x8bba90e6
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int]):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetMessagesReactionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'\xe6\x90\xba\x8b',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(peer=_peer, id=_id)
class GetMessagesViewsRequest(TLRequest):
CONSTRUCTOR_ID = 0x5784d3e1
SUBCLASS_OF_ID = 0xafb5eb9c
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int], increment: bool):
"""
:returns messages.MessageViews: Instance of MessageViews.
"""
self.peer = peer
self.id = id
self.increment = increment
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetMessagesViewsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:],
'increment': self.increment
}
def _bytes(self):
return b''.join((
b'\xe1\xd3\x84W',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
b'\xb5ur\x99' if self.increment else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
_increment = reader.tgread_bool()
return cls(peer=_peer, id=_id, increment=_increment)
class GetOldFeaturedStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x7ed094a1
SUBCLASS_OF_ID = 0x2614b722
# noinspection PyShadowingBuiltins
def __init__(self, offset: int, limit: int, hash: int):
"""
:returns messages.FeaturedStickers: Instance of either FeaturedStickersNotModified, FeaturedStickers.
"""
self.offset = offset
self.limit = limit
self.hash = hash
def to_dict(self):
return {
'_': 'GetOldFeaturedStickersRequest',
'offset': self.offset,
'limit': self.limit,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xa1\x94\xd0~',
struct.pack('<i', self.offset),
struct.pack('<i', self.limit),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
_limit = reader.read_int()
_hash = reader.read_long()
return cls(offset=_offset, limit=_limit, hash=_hash)
class GetOnlinesRequest(TLRequest):
CONSTRUCTOR_ID = 0x6e2be050
SUBCLASS_OF_ID = 0x8c81903a
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns ChatOnlines: Instance of ChatOnlines.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetOnlinesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'P\xe0+n',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class GetPeerDialogsRequest(TLRequest):
CONSTRUCTOR_ID = 0xe470bcfd
SUBCLASS_OF_ID = 0x3ac70132
def __init__(self, peers: List['TypeInputDialogPeer']):
"""
:returns messages.PeerDialogs: Instance of PeerDialogs.
"""
self.peers = peers
async def resolve(self, client, utils):
_tmp = []
for _x in self.peers:
_tmp.append(await client._get_input_dialog(_x))
self.peers = _tmp
def to_dict(self):
return {
'_': 'GetPeerDialogsRequest',
'peers': [] if self.peers is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.peers]
}
def _bytes(self):
return b''.join((
b'\xfd\xbcp\xe4',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.peers)),b''.join(x._bytes() for x in self.peers),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_peers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_peers.append(_x)
return cls(peers=_peers)
class GetPeerSettingsRequest(TLRequest):
CONSTRUCTOR_ID = 0xefd9a6a2
SUBCLASS_OF_ID = 0x65a2f7a1
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns messages.PeerSettings: Instance of PeerSettings.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetPeerSettingsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'\xa2\xa6\xd9\xef',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class GetPinnedDialogsRequest(TLRequest):
CONSTRUCTOR_ID = 0xd6b94df2
SUBCLASS_OF_ID = 0x3ac70132
def __init__(self, folder_id: int):
"""
:returns messages.PeerDialogs: Instance of PeerDialogs.
"""
self.folder_id = folder_id
def to_dict(self):
return {
'_': 'GetPinnedDialogsRequest',
'folder_id': self.folder_id
}
def _bytes(self):
return b''.join((
b'\xf2M\xb9\xd6',
struct.pack('<i', self.folder_id),
))
@classmethod
def from_reader(cls, reader):
_folder_id = reader.read_int()
return cls(folder_id=_folder_id)
class GetPollResultsRequest(TLRequest):
CONSTRUCTOR_ID = 0x73bb643b
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', msg_id: int):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.msg_id = msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetPollResultsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id
}
def _bytes(self):
return b''.join((
b';d\xbbs',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
return cls(peer=_peer, msg_id=_msg_id)
class GetPollVotesRequest(TLRequest):
CONSTRUCTOR_ID = 0xb86e380e
SUBCLASS_OF_ID = 0xc2199885
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, limit: int, option: Optional[bytes]=None, offset: Optional[str]=None):
"""
:returns messages.VotesList: Instance of VotesList.
"""
self.peer = peer
self.id = id
self.limit = limit
self.option = option
self.offset = offset
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetPollVotesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'limit': self.limit,
'option': self.option,
'offset': self.offset
}
def _bytes(self):
return b''.join((
b'\x0e8n\xb8',
struct.pack('<I', (0 if self.option is None or self.option is False else 1) | (0 if self.offset is None or self.offset is False else 2)),
self.peer._bytes(),
struct.pack('<i', self.id),
b'' if self.option is None or self.option is False else (self.serialize_bytes(self.option)),
b'' if self.offset is None or self.offset is False else (self.serialize_bytes(self.offset)),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
_id = reader.read_int()
if flags & 1:
_option = reader.tgread_bytes()
else:
_option = None
if flags & 2:
_offset = reader.tgread_string()
else:
_offset = None
_limit = reader.read_int()
return cls(peer=_peer, id=_id, limit=_limit, option=_option, offset=_offset)
class GetRecentLocationsRequest(TLRequest):
CONSTRUCTOR_ID = 0x702a40e0
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', limit: int, hash: int):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.limit = limit
self.hash = hash
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetRecentLocationsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'limit': self.limit,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xe0@*p',
self.peer._bytes(),
struct.pack('<i', self.limit),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_limit = reader.read_int()
_hash = reader.read_long()
return cls(peer=_peer, limit=_limit, hash=_hash)
class GetRecentReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x39461db2
SUBCLASS_OF_ID = 0xadc38324
# noinspection PyShadowingBuiltins
def __init__(self, limit: int, hash: int):
"""
:returns messages.Reactions: Instance of either ReactionsNotModified, Reactions.
"""
self.limit = limit
self.hash = hash
def to_dict(self):
return {
'_': 'GetRecentReactionsRequest',
'limit': self.limit,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xb2\x1dF9',
struct.pack('<i', self.limit),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_limit = reader.read_int()
_hash = reader.read_long()
return cls(limit=_limit, hash=_hash)
class GetRecentStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x9da9403b
SUBCLASS_OF_ID = 0xf76f8683
# noinspection PyShadowingBuiltins
def __init__(self, hash: int, attached: Optional[bool]=None):
"""
:returns messages.RecentStickers: Instance of either RecentStickersNotModified, RecentStickers.
"""
self.hash = hash
self.attached = attached
def to_dict(self):
return {
'_': 'GetRecentStickersRequest',
'hash': self.hash,
'attached': self.attached
}
def _bytes(self):
return b''.join((
b';@\xa9\x9d',
struct.pack('<I', (0 if self.attached is None or self.attached is False else 1)),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_attached = bool(flags & 1)
_hash = reader.read_long()
return cls(hash=_hash, attached=_attached)
class GetRepliesRequest(TLRequest):
CONSTRUCTOR_ID = 0x22ddd30c
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', msg_id: int, offset_id: int, offset_date: Optional[datetime], add_offset: int, limit: int, max_id: int, min_id: int, hash: int):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.msg_id = msg_id
self.offset_id = offset_id
self.offset_date = offset_date
self.add_offset = add_offset
self.limit = limit
self.max_id = max_id
self.min_id = min_id
self.hash = hash
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetRepliesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'offset_id': self.offset_id,
'offset_date': self.offset_date,
'add_offset': self.add_offset,
'limit': self.limit,
'max_id': self.max_id,
'min_id': self.min_id,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x0c\xd3\xdd"',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
struct.pack('<i', self.offset_id),
self.serialize_datetime(self.offset_date),
struct.pack('<i', self.add_offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.max_id),
struct.pack('<i', self.min_id),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
_offset_id = reader.read_int()
_offset_date = reader.tgread_date()
_add_offset = reader.read_int()
_limit = reader.read_int()
_max_id = reader.read_int()
_min_id = reader.read_int()
_hash = reader.read_long()
return cls(peer=_peer, msg_id=_msg_id, offset_id=_offset_id, offset_date=_offset_date, add_offset=_add_offset, limit=_limit, max_id=_max_id, min_id=_min_id, hash=_hash)
class GetSavedGifsRequest(TLRequest):
CONSTRUCTOR_ID = 0x5cf09635
SUBCLASS_OF_ID = 0xa68b61f5
# noinspection PyShadowingBuiltins
def __init__(self, hash: int):
"""
:returns messages.SavedGifs: Instance of either SavedGifsNotModified, SavedGifs.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'GetSavedGifsRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'5\x96\xf0\\',
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.read_long()
return cls(hash=_hash)
class GetScheduledHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0xf516760b
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', hash: int):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.hash = hash
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetScheduledHistoryRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x0bv\x16\xf5',
self.peer._bytes(),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_hash = reader.read_long()
return cls(peer=_peer, hash=_hash)
class GetScheduledMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xbdbb0464
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int]):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetScheduledMessagesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'd\x04\xbb\xbd',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(peer=_peer, id=_id)
class GetSearchCountersRequest(TLRequest):
CONSTRUCTOR_ID = 0xae7cc1
SUBCLASS_OF_ID = 0x6bde3c6e
def __init__(self, peer: 'TypeInputPeer', filters: List['TypeMessagesFilter'], top_msg_id: Optional[int]=None):
"""
:returns Vector<messages.SearchCounter>: This type has no constructors.
"""
self.peer = peer
self.filters = filters
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetSearchCountersRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'filters': [] if self.filters is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.filters],
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'\xc1|\xae\x00',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.filters)),b''.join(x._bytes() for x in self.filters),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
reader.read_int()
_filters = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_filters.append(_x)
return cls(peer=_peer, filters=_filters, top_msg_id=_top_msg_id)
class GetSearchResultsCalendarRequest(TLRequest):
CONSTRUCTOR_ID = 0x49f0bde9
SUBCLASS_OF_ID = 0x92c5640f
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', filter: 'TypeMessagesFilter', offset_id: int, offset_date: Optional[datetime]):
"""
:returns messages.SearchResultsCalendar: Instance of SearchResultsCalendar.
"""
self.peer = peer
self.filter = filter
self.offset_id = offset_id
self.offset_date = offset_date
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetSearchResultsCalendarRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter,
'offset_id': self.offset_id,
'offset_date': self.offset_date
}
def _bytes(self):
return b''.join((
b'\xe9\xbd\xf0I',
self.peer._bytes(),
self.filter._bytes(),
struct.pack('<i', self.offset_id),
self.serialize_datetime(self.offset_date),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_filter = reader.tgread_object()
_offset_id = reader.read_int()
_offset_date = reader.tgread_date()
return cls(peer=_peer, filter=_filter, offset_id=_offset_id, offset_date=_offset_date)
class GetSearchResultsPositionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x6e9583a3
SUBCLASS_OF_ID = 0xd963708d
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', filter: 'TypeMessagesFilter', offset_id: int, limit: int):
"""
:returns messages.SearchResultsPositions: Instance of SearchResultsPositions.
"""
self.peer = peer
self.filter = filter
self.offset_id = offset_id
self.limit = limit
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetSearchResultsPositionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter,
'offset_id': self.offset_id,
'limit': self.limit
}
def _bytes(self):
return b''.join((
b'\xa3\x83\x95n',
self.peer._bytes(),
self.filter._bytes(),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_filter = reader.tgread_object()
_offset_id = reader.read_int()
_limit = reader.read_int()
return cls(peer=_peer, filter=_filter, offset_id=_offset_id, limit=_limit)
class GetSplitRangesRequest(TLRequest):
CONSTRUCTOR_ID = 0x1cff7e08
SUBCLASS_OF_ID = 0x5ba52504
def to_dict(self):
return {
'_': 'GetSplitRangesRequest'
}
def _bytes(self):
return b''.join((
b'\x08~\xff\x1c',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xc8a0ec74
SUBCLASS_OF_ID = 0x9b704a5a
# noinspection PyShadowingBuiltins
def __init__(self, stickerset: 'TypeInputStickerSet', hash: int):
"""
:returns messages.StickerSet: Instance of either StickerSet, StickerSetNotModified.
"""
self.stickerset = stickerset
self.hash = hash
def to_dict(self):
return {
'_': 'GetStickerSetRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b't\xec\xa0\xc8',
self.stickerset._bytes(),
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_hash = reader.read_int()
return cls(stickerset=_stickerset, hash=_hash)
class GetStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0xd5a5d3a1
SUBCLASS_OF_ID = 0xd73bb9de
# noinspection PyShadowingBuiltins
def __init__(self, emoticon: str, hash: int):
"""
:returns messages.Stickers: Instance of either StickersNotModified, Stickers.
"""
self.emoticon = emoticon
self.hash = hash
def to_dict(self):
return {
'_': 'GetStickersRequest',
'emoticon': self.emoticon,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xa1\xd3\xa5\xd5',
self.serialize_bytes(self.emoticon),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_emoticon = reader.tgread_string()
_hash = reader.read_long()
return cls(emoticon=_emoticon, hash=_hash)
class GetSuggestedDialogFiltersRequest(TLRequest):
CONSTRUCTOR_ID = 0xa29cd42c
SUBCLASS_OF_ID = 0x7b296c39
def to_dict(self):
return {
'_': 'GetSuggestedDialogFiltersRequest'
}
def _bytes(self):
return b''.join((
b',\xd4\x9c\xa2',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetTopReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0xbb8125ba
SUBCLASS_OF_ID = 0xadc38324
# noinspection PyShadowingBuiltins
def __init__(self, limit: int, hash: int):
"""
:returns messages.Reactions: Instance of either ReactionsNotModified, Reactions.
"""
self.limit = limit
self.hash = hash
def to_dict(self):
return {
'_': 'GetTopReactionsRequest',
'limit': self.limit,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xba%\x81\xbb',
struct.pack('<i', self.limit),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_limit = reader.read_int()
_hash = reader.read_long()
return cls(limit=_limit, hash=_hash)
class GetUnreadMentionsRequest(TLRequest):
CONSTRUCTOR_ID = 0xf107e790
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, peer: 'TypeInputPeer', offset_id: int, add_offset: int, limit: int, max_id: int, min_id: int, top_msg_id: Optional[int]=None):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.offset_id = offset_id
self.add_offset = add_offset
self.limit = limit
self.max_id = max_id
self.min_id = min_id
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetUnreadMentionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'offset_id': self.offset_id,
'add_offset': self.add_offset,
'limit': self.limit,
'max_id': self.max_id,
'min_id': self.min_id,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'\x90\xe7\x07\xf1',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.add_offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.max_id),
struct.pack('<i', self.min_id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_offset_id = reader.read_int()
_add_offset = reader.read_int()
_limit = reader.read_int()
_max_id = reader.read_int()
_min_id = reader.read_int()
return cls(peer=_peer, offset_id=_offset_id, add_offset=_add_offset, limit=_limit, max_id=_max_id, min_id=_min_id, top_msg_id=_top_msg_id)
class GetUnreadReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x3223495b
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, peer: 'TypeInputPeer', offset_id: int, add_offset: int, limit: int, max_id: int, min_id: int, top_msg_id: Optional[int]=None):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.offset_id = offset_id
self.add_offset = add_offset
self.limit = limit
self.max_id = max_id
self.min_id = min_id
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetUnreadReactionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'offset_id': self.offset_id,
'add_offset': self.add_offset,
'limit': self.limit,
'max_id': self.max_id,
'min_id': self.min_id,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'[I#2',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.add_offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.max_id),
struct.pack('<i', self.min_id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_offset_id = reader.read_int()
_add_offset = reader.read_int()
_limit = reader.read_int()
_max_id = reader.read_int()
_min_id = reader.read_int()
return cls(peer=_peer, offset_id=_offset_id, add_offset=_add_offset, limit=_limit, max_id=_max_id, min_id=_min_id, top_msg_id=_top_msg_id)
class GetWebPageRequest(TLRequest):
CONSTRUCTOR_ID = 0x32ca8f91
SUBCLASS_OF_ID = 0x55a97481
# noinspection PyShadowingBuiltins
def __init__(self, url: str, hash: int):
"""
:returns WebPage: Instance of either WebPageEmpty, WebPagePending, WebPage, WebPageNotModified.
"""
self.url = url
self.hash = hash
def to_dict(self):
return {
'_': 'GetWebPageRequest',
'url': self.url,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x91\x8f\xca2',
self.serialize_bytes(self.url),
struct.pack('<i', self.hash),
))
@classmethod
def from_reader(cls, reader):
_url = reader.tgread_string()
_hash = reader.read_int()
return cls(url=_url, hash=_hash)
class GetWebPagePreviewRequest(TLRequest):
CONSTRUCTOR_ID = 0x8b68b0cc
SUBCLASS_OF_ID = 0x476cbe32
def __init__(self, message: str, entities: Optional[List['TypeMessageEntity']]=None):
"""
:returns MessageMedia: Instance of either MessageMediaEmpty, MessageMediaPhoto, MessageMediaGeo, MessageMediaContact, MessageMediaUnsupported, MessageMediaDocument, MessageMediaWebPage, MessageMediaVenue, MessageMediaGame, MessageMediaInvoice, MessageMediaGeoLive, MessageMediaPoll, MessageMediaDice.
"""
self.message = message
self.entities = entities
def to_dict(self):
return {
'_': 'GetWebPagePreviewRequest',
'message': self.message,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities]
}
def _bytes(self):
return b''.join((
b'\xcc\xb0h\x8b',
struct.pack('<I', (0 if self.entities is None or self.entities is False else 8)),
self.serialize_bytes(self.message),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_message = reader.tgread_string()
if flags & 8:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
return cls(message=_message, entities=_entities)
class HideAllChatJoinRequestsRequest(TLRequest):
CONSTRUCTOR_ID = 0xe085f4ea
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', approved: Optional[bool]=None, link: Optional[str]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.approved = approved
self.link = link
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'HideAllChatJoinRequestsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'approved': self.approved,
'link': self.link
}
def _bytes(self):
return b''.join((
b'\xea\xf4\x85\xe0',
struct.pack('<I', (0 if self.approved is None or self.approved is False else 1) | (0 if self.link is None or self.link is False else 2)),
self.peer._bytes(),
b'' if self.link is None or self.link is False else (self.serialize_bytes(self.link)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_approved = bool(flags & 1)
_peer = reader.tgread_object()
if flags & 2:
_link = reader.tgread_string()
else:
_link = None
return cls(peer=_peer, approved=_approved, link=_link)
class HideChatJoinRequestRequest(TLRequest):
CONSTRUCTOR_ID = 0x7fe7e815
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', user_id: 'TypeInputUser', approved: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.user_id = user_id
self.approved = approved
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'HideChatJoinRequestRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'approved': self.approved
}
def _bytes(self):
return b''.join((
b'\x15\xe8\xe7\x7f',
struct.pack('<I', (0 if self.approved is None or self.approved is False else 1)),
self.peer._bytes(),
self.user_id._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_approved = bool(flags & 1)
_peer = reader.tgread_object()
_user_id = reader.tgread_object()
return cls(peer=_peer, user_id=_user_id, approved=_approved)
class HidePeerSettingsBarRequest(TLRequest):
CONSTRUCTOR_ID = 0x4facb138
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'HidePeerSettingsBarRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'8\xb1\xacO',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class ImportChatInviteRequest(TLRequest):
CONSTRUCTOR_ID = 0x6c50051c
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, hash: str):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.hash = hash
def to_dict(self):
return {
'_': 'ImportChatInviteRequest',
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\x1c\x05Pl',
self.serialize_bytes(self.hash),
))
@classmethod
def from_reader(cls, reader):
_hash = reader.tgread_string()
return cls(hash=_hash)
class InitHistoryImportRequest(TLRequest):
CONSTRUCTOR_ID = 0x34090c3b
SUBCLASS_OF_ID = 0xb18bb50a
def __init__(self, peer: 'TypeInputPeer', file: 'TypeInputFile', media_count: int):
"""
:returns messages.HistoryImport: Instance of HistoryImport.
"""
self.peer = peer
self.file = file
self.media_count = media_count
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'InitHistoryImportRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'file': self.file.to_dict() if isinstance(self.file, TLObject) else self.file,
'media_count': self.media_count
}
def _bytes(self):
return b''.join((
b';\x0c\t4',
self.peer._bytes(),
self.file._bytes(),
struct.pack('<i', self.media_count),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_file = reader.tgread_object()
_media_count = reader.read_int()
return cls(peer=_peer, file=_file, media_count=_media_count)
class InstallStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xc78fe460
SUBCLASS_OF_ID = 0x67cb3fe8
def __init__(self, stickerset: 'TypeInputStickerSet', archived: bool):
"""
:returns messages.StickerSetInstallResult: Instance of either StickerSetInstallResultSuccess, StickerSetInstallResultArchive.
"""
self.stickerset = stickerset
self.archived = archived
def to_dict(self):
return {
'_': 'InstallStickerSetRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset,
'archived': self.archived
}
def _bytes(self):
return b''.join((
b'`\xe4\x8f\xc7',
self.stickerset._bytes(),
b'\xb5ur\x99' if self.archived else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_archived = reader.tgread_bool()
return cls(stickerset=_stickerset, archived=_archived)
class MarkDialogUnreadRequest(TLRequest):
CONSTRUCTOR_ID = 0xc286d98f
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputDialogPeer', unread: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.unread = unread
async def resolve(self, client, utils):
self.peer = await client._get_input_dialog(self.peer)
def to_dict(self):
return {
'_': 'MarkDialogUnreadRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'unread': self.unread
}
def _bytes(self):
return b''.join((
b'\x8f\xd9\x86\xc2',
struct.pack('<I', (0 if self.unread is None or self.unread is False else 1)),
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_unread = bool(flags & 1)
_peer = reader.tgread_object()
return cls(peer=_peer, unread=_unread)
class MigrateChatRequest(TLRequest):
CONSTRUCTOR_ID = 0xa2875319
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, chat_id: int):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.chat_id = chat_id
def to_dict(self):
return {
'_': 'MigrateChatRequest',
'chat_id': self.chat_id
}
def _bytes(self):
return b''.join((
b'\x19S\x87\xa2',
struct.pack('<q', self.chat_id),
))
@classmethod
def from_reader(cls, reader):
_chat_id = reader.read_long()
return cls(chat_id=_chat_id)
class ProlongWebViewRequest(TLRequest):
CONSTRUCTOR_ID = 0x7ff34309
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', bot: 'TypeInputUser', query_id: int, silent: Optional[bool]=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.bot = bot
self.query_id = query_id
self.silent = silent
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'ProlongWebViewRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'query_id': self.query_id,
'silent': self.silent,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\tC\xf3\x7f',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 32) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.peer._bytes(),
self.bot._bytes(),
struct.pack('<q', self.query_id),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 32)
_peer = reader.tgread_object()
_bot = reader.tgread_object()
_query_id = reader.read_long()
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(peer=_peer, bot=_bot, query_id=_query_id, silent=_silent, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, send_as=_send_as)
class RateTranscribedAudioRequest(TLRequest):
CONSTRUCTOR_ID = 0x7f1d072f
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', msg_id: int, transcription_id: int, good: bool):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.msg_id = msg_id
self.transcription_id = transcription_id
self.good = good
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'RateTranscribedAudioRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'transcription_id': self.transcription_id,
'good': self.good
}
def _bytes(self):
return b''.join((
b'/\x07\x1d\x7f',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
struct.pack('<q', self.transcription_id),
b'\xb5ur\x99' if self.good else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
_transcription_id = reader.read_long()
_good = reader.tgread_bool()
return cls(peer=_peer, msg_id=_msg_id, transcription_id=_transcription_id, good=_good)
class ReadDiscussionRequest(TLRequest):
CONSTRUCTOR_ID = 0xf731a9f4
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', msg_id: int, read_max_id: int):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.msg_id = msg_id
self.read_max_id = read_max_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ReadDiscussionRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'read_max_id': self.read_max_id
}
def _bytes(self):
return b''.join((
b'\xf4\xa91\xf7',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
struct.pack('<i', self.read_max_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
_read_max_id = reader.read_int()
return cls(peer=_peer, msg_id=_msg_id, read_max_id=_read_max_id)
class ReadEncryptedHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0x7f4b690a
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputEncryptedChat', max_date: Optional[datetime]):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.max_date = max_date
def to_dict(self):
return {
'_': 'ReadEncryptedHistoryRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'max_date': self.max_date
}
def _bytes(self):
return b''.join((
b'\niK\x7f',
self.peer._bytes(),
self.serialize_datetime(self.max_date),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_max_date = reader.tgread_date()
return cls(peer=_peer, max_date=_max_date)
class ReadFeaturedStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0x5b118126
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: List[int]):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
def to_dict(self):
return {
'_': 'ReadFeaturedStickersRequest',
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'&\x81\x11[',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<q', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_id.append(_x)
return cls(id=_id)
class ReadHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0xe306d3a
SUBCLASS_OF_ID = 0xced3c06e
def __init__(self, peer: 'TypeInputPeer', max_id: int):
"""
:returns messages.AffectedMessages: Instance of AffectedMessages.
"""
self.peer = peer
self.max_id = max_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ReadHistoryRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'max_id': self.max_id
}
def _bytes(self):
return b''.join((
b':m0\x0e',
self.peer._bytes(),
struct.pack('<i', self.max_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_max_id = reader.read_int()
return cls(peer=_peer, max_id=_max_id)
class ReadMentionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x36e5bf4d
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, peer: 'TypeInputPeer', top_msg_id: Optional[int]=None):
"""
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
self.peer = peer
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ReadMentionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'M\xbf\xe56',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
return cls(peer=_peer, top_msg_id=_top_msg_id)
class ReadMessageContentsRequest(TLRequest):
CONSTRUCTOR_ID = 0x36a73f77
SUBCLASS_OF_ID = 0xced3c06e
# noinspection PyShadowingBuiltins
def __init__(self, id: List[int]):
"""
:returns messages.AffectedMessages: Instance of AffectedMessages.
"""
self.id = id
def to_dict(self):
return {
'_': 'ReadMessageContentsRequest',
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'w?\xa76',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(id=_id)
class ReadReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0x54aa7f8e
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, peer: 'TypeInputPeer', top_msg_id: Optional[int]=None):
"""
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
self.peer = peer
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ReadReactionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'\x8e\x7f\xaaT',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
return cls(peer=_peer, top_msg_id=_top_msg_id)
class ReceivedMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x5a954c0
SUBCLASS_OF_ID = 0x8565f897
def __init__(self, max_id: int):
"""
:returns Vector<ReceivedNotifyMessage>: This type has no constructors.
"""
self.max_id = max_id
def to_dict(self):
return {
'_': 'ReceivedMessagesRequest',
'max_id': self.max_id
}
def _bytes(self):
return b''.join((
b'\xc0T\xa9\x05',
struct.pack('<i', self.max_id),
))
@classmethod
def from_reader(cls, reader):
_max_id = reader.read_int()
return cls(max_id=_max_id)
class ReceivedQueueRequest(TLRequest):
CONSTRUCTOR_ID = 0x55a5bb66
SUBCLASS_OF_ID = 0x8918e168
def __init__(self, max_qts: int):
"""
:returns Vector<long>: This type has no constructors.
"""
self.max_qts = max_qts
def to_dict(self):
return {
'_': 'ReceivedQueueRequest',
'max_qts': self.max_qts
}
def _bytes(self):
return b''.join((
b'f\xbb\xa5U',
struct.pack('<i', self.max_qts),
))
@classmethod
def from_reader(cls, reader):
_max_qts = reader.read_int()
return cls(max_qts=_max_qts)
@staticmethod
def read_result(reader):
reader.read_int() # Vector ID
return [reader.read_long() for _ in range(reader.read_int())]
class ReorderPinnedDialogsRequest(TLRequest):
CONSTRUCTOR_ID = 0x3b1adf37
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, folder_id: int, order: List['TypeInputDialogPeer'], force: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.folder_id = folder_id
self.order = order
self.force = force
async def resolve(self, client, utils):
_tmp = []
for _x in self.order:
_tmp.append(await client._get_input_dialog(_x))
self.order = _tmp
def to_dict(self):
return {
'_': 'ReorderPinnedDialogsRequest',
'folder_id': self.folder_id,
'order': [] if self.order is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.order],
'force': self.force
}
def _bytes(self):
return b''.join((
b'7\xdf\x1a;',
struct.pack('<I', (0 if self.force is None or self.force is False else 1)),
struct.pack('<i', self.folder_id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.order)),b''.join(x._bytes() for x in self.order),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_force = bool(flags & 1)
_folder_id = reader.read_int()
reader.read_int()
_order = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_order.append(_x)
return cls(folder_id=_folder_id, order=_order, force=_force)
class ReorderStickerSetsRequest(TLRequest):
CONSTRUCTOR_ID = 0x78337739
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, order: List[int], masks: Optional[bool]=None, emojis: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.order = order
self.masks = masks
self.emojis = emojis
def to_dict(self):
return {
'_': 'ReorderStickerSetsRequest',
'order': [] if self.order is None else self.order[:],
'masks': self.masks,
'emojis': self.emojis
}
def _bytes(self):
return b''.join((
b'9w3x',
struct.pack('<I', (0 if self.masks is None or self.masks is False else 1) | (0 if self.emojis is None or self.emojis is False else 2)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.order)),b''.join(struct.pack('<q', x) for x in self.order),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_masks = bool(flags & 1)
_emojis = bool(flags & 2)
reader.read_int()
_order = []
for _ in range(reader.read_int()):
_x = reader.read_long()
_order.append(_x)
return cls(order=_order, masks=_masks, emojis=_emojis)
class ReportRequest(TLRequest):
CONSTRUCTOR_ID = 0x8953ab4e
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int], reason: 'TypeReportReason', message: str):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.id = id
self.reason = reason
self.message = message
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ReportRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:],
'reason': self.reason.to_dict() if isinstance(self.reason, TLObject) else self.reason,
'message': self.message
}
def _bytes(self):
return b''.join((
b'N\xabS\x89',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
self.reason._bytes(),
self.serialize_bytes(self.message),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
_reason = reader.tgread_object()
_message = reader.tgread_string()
return cls(peer=_peer, id=_id, reason=_reason, message=_message)
class ReportEncryptedSpamRequest(TLRequest):
CONSTRUCTOR_ID = 0x4b0c8c0f
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputEncryptedChat'):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
def to_dict(self):
return {
'_': 'ReportEncryptedSpamRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'\x0f\x8c\x0cK',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class ReportReactionRequest(TLRequest):
CONSTRUCTOR_ID = 0x3f64c076
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, reaction_peer: 'TypeInputPeer'):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.id = id
self.reaction_peer = reaction_peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.reaction_peer = utils.get_input_peer(await client.get_input_entity(self.reaction_peer))
def to_dict(self):
return {
'_': 'ReportReactionRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'reaction_peer': self.reaction_peer.to_dict() if isinstance(self.reaction_peer, TLObject) else self.reaction_peer
}
def _bytes(self):
return b''.join((
b'v\xc0d?',
self.peer._bytes(),
struct.pack('<i', self.id),
self.reaction_peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_id = reader.read_int()
_reaction_peer = reader.tgread_object()
return cls(peer=_peer, id=_id, reaction_peer=_reaction_peer)
class ReportSpamRequest(TLRequest):
CONSTRUCTOR_ID = 0xcf1592db
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ReportSpamRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'\xdb\x92\x15\xcf',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class RequestAppWebViewRequest(TLRequest):
CONSTRUCTOR_ID = 0x8c5a3b3c
SUBCLASS_OF_ID = 0x1c24a413
def __init__(self, peer: 'TypeInputPeer', app: 'TypeInputBotApp', platform: str, write_allowed: Optional[bool]=None, start_param: Optional[str]=None, theme_params: Optional['TypeDataJSON']=None):
"""
:returns AppWebViewResult: Instance of AppWebViewResultUrl.
"""
self.peer = peer
self.app = app
self.platform = platform
self.write_allowed = write_allowed
self.start_param = start_param
self.theme_params = theme_params
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'RequestAppWebViewRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'app': self.app.to_dict() if isinstance(self.app, TLObject) else self.app,
'platform': self.platform,
'write_allowed': self.write_allowed,
'start_param': self.start_param,
'theme_params': self.theme_params.to_dict() if isinstance(self.theme_params, TLObject) else self.theme_params
}
def _bytes(self):
return b''.join((
b'<;Z\x8c',
struct.pack('<I', (0 if self.write_allowed is None or self.write_allowed is False else 1) | (0 if self.start_param is None or self.start_param is False else 2) | (0 if self.theme_params is None or self.theme_params is False else 4)),
self.peer._bytes(),
self.app._bytes(),
b'' if self.start_param is None or self.start_param is False else (self.serialize_bytes(self.start_param)),
b'' if self.theme_params is None or self.theme_params is False else (self.theme_params._bytes()),
self.serialize_bytes(self.platform),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_write_allowed = bool(flags & 1)
_peer = reader.tgread_object()
_app = reader.tgread_object()
if flags & 2:
_start_param = reader.tgread_string()
else:
_start_param = None
if flags & 4:
_theme_params = reader.tgread_object()
else:
_theme_params = None
_platform = reader.tgread_string()
return cls(peer=_peer, app=_app, platform=_platform, write_allowed=_write_allowed, start_param=_start_param, theme_params=_theme_params)
class RequestEncryptionRequest(TLRequest):
CONSTRUCTOR_ID = 0xf64daf43
SUBCLASS_OF_ID = 0x6d28a37a
def __init__(self, user_id: 'TypeInputUser', g_a: bytes, random_id: int=None):
"""
:returns EncryptedChat: Instance of either EncryptedChatEmpty, EncryptedChatWaiting, EncryptedChatRequested, EncryptedChat, EncryptedChatDiscarded.
"""
self.user_id = user_id
self.g_a = g_a
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(4), 'big', signed=True)
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'RequestEncryptionRequest',
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'g_a': self.g_a,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'C\xafM\xf6',
self.user_id._bytes(),
struct.pack('<i', self.random_id),
self.serialize_bytes(self.g_a),
))
@classmethod
def from_reader(cls, reader):
_user_id = reader.tgread_object()
_random_id = reader.read_int()
_g_a = reader.tgread_bytes()
return cls(user_id=_user_id, g_a=_g_a, random_id=_random_id)
class RequestSimpleWebViewRequest(TLRequest):
CONSTRUCTOR_ID = 0x299bec8e
SUBCLASS_OF_ID = 0x15eee3db
def __init__(self, bot: 'TypeInputUser', url: str, platform: str, from_switch_webview: Optional[bool]=None, theme_params: Optional['TypeDataJSON']=None):
"""
:returns SimpleWebViewResult: Instance of SimpleWebViewResultUrl.
"""
self.bot = bot
self.url = url
self.platform = platform
self.from_switch_webview = from_switch_webview
self.theme_params = theme_params
async def resolve(self, client, utils):
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
def to_dict(self):
return {
'_': 'RequestSimpleWebViewRequest',
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'url': self.url,
'platform': self.platform,
'from_switch_webview': self.from_switch_webview,
'theme_params': self.theme_params.to_dict() if isinstance(self.theme_params, TLObject) else self.theme_params
}
def _bytes(self):
return b''.join((
b'\x8e\xec\x9b)',
struct.pack('<I', (0 if self.from_switch_webview is None or self.from_switch_webview is False else 2) | (0 if self.theme_params is None or self.theme_params is False else 1)),
self.bot._bytes(),
self.serialize_bytes(self.url),
b'' if self.theme_params is None or self.theme_params is False else (self.theme_params._bytes()),
self.serialize_bytes(self.platform),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_from_switch_webview = bool(flags & 2)
_bot = reader.tgread_object()
_url = reader.tgread_string()
if flags & 1:
_theme_params = reader.tgread_object()
else:
_theme_params = None
_platform = reader.tgread_string()
return cls(bot=_bot, url=_url, platform=_platform, from_switch_webview=_from_switch_webview, theme_params=_theme_params)
class RequestUrlAuthRequest(TLRequest):
CONSTRUCTOR_ID = 0x198fb446
SUBCLASS_OF_ID = 0x7765cb1e
def __init__(self, peer: Optional['TypeInputPeer']=None, msg_id: Optional[int]=None, button_id: Optional[int]=None, url: Optional[str]=None):
"""
:returns UrlAuthResult: Instance of either UrlAuthResultRequest, UrlAuthResultAccepted, UrlAuthResultDefault.
"""
self.peer = peer
self.msg_id = msg_id
self.button_id = button_id
self.url = url
async def resolve(self, client, utils):
if self.peer:
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'RequestUrlAuthRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'button_id': self.button_id,
'url': self.url
}
def _bytes(self):
assert ((self.peer or self.peer is not None) and (self.msg_id or self.msg_id is not None) and (self.button_id or self.button_id is not None)) or ((self.peer is None or self.peer is False) and (self.msg_id is None or self.msg_id is False) and (self.button_id is None or self.button_id is False)), 'peer, msg_id, button_id parameters must all be False-y (like None) or all me True-y'
return b''.join((
b'F\xb4\x8f\x19',
struct.pack('<I', (0 if self.peer is None or self.peer is False else 2) | (0 if self.msg_id is None or self.msg_id is False else 2) | (0 if self.button_id is None or self.button_id is False else 2) | (0 if self.url is None or self.url is False else 4)),
b'' if self.peer is None or self.peer is False else (self.peer._bytes()),
b'' if self.msg_id is None or self.msg_id is False else (struct.pack('<i', self.msg_id)),
b'' if self.button_id is None or self.button_id is False else (struct.pack('<i', self.button_id)),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
if flags & 2:
_peer = reader.tgread_object()
else:
_peer = None
if flags & 2:
_msg_id = reader.read_int()
else:
_msg_id = None
if flags & 2:
_button_id = reader.read_int()
else:
_button_id = None
if flags & 4:
_url = reader.tgread_string()
else:
_url = None
return cls(peer=_peer, msg_id=_msg_id, button_id=_button_id, url=_url)
class RequestWebViewRequest(TLRequest):
CONSTRUCTOR_ID = 0x178b480b
SUBCLASS_OF_ID = 0x93cea746
def __init__(self, peer: 'TypeInputPeer', bot: 'TypeInputUser', platform: str, from_bot_menu: Optional[bool]=None, silent: Optional[bool]=None, url: Optional[str]=None, start_param: Optional[str]=None, theme_params: Optional['TypeDataJSON']=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns WebViewResult: Instance of WebViewResultUrl.
"""
self.peer = peer
self.bot = bot
self.platform = platform
self.from_bot_menu = from_bot_menu
self.silent = silent
self.url = url
self.start_param = start_param
self.theme_params = theme_params
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'RequestWebViewRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'platform': self.platform,
'from_bot_menu': self.from_bot_menu,
'silent': self.silent,
'url': self.url,
'start_param': self.start_param,
'theme_params': self.theme_params.to_dict() if isinstance(self.theme_params, TLObject) else self.theme_params,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\x0bH\x8b\x17',
struct.pack('<I', (0 if self.from_bot_menu is None or self.from_bot_menu is False else 16) | (0 if self.silent is None or self.silent is False else 32) | (0 if self.url is None or self.url is False else 2) | (0 if self.start_param is None or self.start_param is False else 8) | (0 if self.theme_params is None or self.theme_params is False else 4) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.peer._bytes(),
self.bot._bytes(),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
b'' if self.start_param is None or self.start_param is False else (self.serialize_bytes(self.start_param)),
b'' if self.theme_params is None or self.theme_params is False else (self.theme_params._bytes()),
self.serialize_bytes(self.platform),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_from_bot_menu = bool(flags & 16)
_silent = bool(flags & 32)
_peer = reader.tgread_object()
_bot = reader.tgread_object()
if flags & 2:
_url = reader.tgread_string()
else:
_url = None
if flags & 8:
_start_param = reader.tgread_string()
else:
_start_param = None
if flags & 4:
_theme_params = reader.tgread_object()
else:
_theme_params = None
_platform = reader.tgread_string()
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(peer=_peer, bot=_bot, platform=_platform, from_bot_menu=_from_bot_menu, silent=_silent, url=_url, start_param=_start_param, theme_params=_theme_params, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, send_as=_send_as)
class SaveDefaultSendAsRequest(TLRequest):
CONSTRUCTOR_ID = 0xccfddf96
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', send_as: 'TypeInputPeer'):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'SaveDefaultSendAsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\x96\xdf\xfd\xcc',
self.peer._bytes(),
self.send_as._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_send_as = reader.tgread_object()
return cls(peer=_peer, send_as=_send_as)
class SaveDraftRequest(TLRequest):
CONSTRUCTOR_ID = 0xb4331e3f
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', message: str, no_webpage: Optional[bool]=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, entities: Optional[List['TypeMessageEntity']]=None):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.message = message
self.no_webpage = no_webpage
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.entities = entities
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SaveDraftRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'message': self.message,
'no_webpage': self.no_webpage,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities]
}
def _bytes(self):
return b''.join((
b'?\x1e3\xb4',
struct.pack('<I', (0 if self.no_webpage is None or self.no_webpage is False else 2) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 4) | (0 if self.entities is None or self.entities is False else 8)),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
self.peer._bytes(),
self.serialize_bytes(self.message),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_no_webpage = bool(flags & 2)
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 4:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_peer = reader.tgread_object()
_message = reader.tgread_string()
if flags & 8:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
return cls(peer=_peer, message=_message, no_webpage=_no_webpage, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, entities=_entities)
class SaveGifRequest(TLRequest):
CONSTRUCTOR_ID = 0x327a30cb
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeInputDocument', unsave: bool):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
self.unsave = unsave
async def resolve(self, client, utils):
self.id = utils.get_input_document(self.id)
def to_dict(self):
return {
'_': 'SaveGifRequest',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'unsave': self.unsave
}
def _bytes(self):
return b''.join((
b'\xcb0z2',
self.id._bytes(),
b'\xb5ur\x99' if self.unsave else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_id = reader.tgread_object()
_unsave = reader.tgread_bool()
return cls(id=_id, unsave=_unsave)
class SaveRecentStickerRequest(TLRequest):
CONSTRUCTOR_ID = 0x392718f8
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeInputDocument', unsave: bool, attached: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
self.unsave = unsave
self.attached = attached
async def resolve(self, client, utils):
self.id = utils.get_input_document(self.id)
def to_dict(self):
return {
'_': 'SaveRecentStickerRequest',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'unsave': self.unsave,
'attached': self.attached
}
def _bytes(self):
return b''.join((
b"\xf8\x18'9",
struct.pack('<I', (0 if self.attached is None or self.attached is False else 1)),
self.id._bytes(),
b'\xb5ur\x99' if self.unsave else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_attached = bool(flags & 1)
_id = reader.tgread_object()
_unsave = reader.tgread_bool()
return cls(id=_id, unsave=_unsave, attached=_attached)
class SearchRequest(TLRequest):
CONSTRUCTOR_ID = 0xa0fda762
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', q: str, filter: 'TypeMessagesFilter', min_date: Optional[datetime], max_date: Optional[datetime], offset_id: int, add_offset: int, limit: int, max_id: int, min_id: int, hash: int, from_id: Optional['TypeInputPeer']=None, top_msg_id: Optional[int]=None):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.peer = peer
self.q = q
self.filter = filter
self.min_date = min_date
self.max_date = max_date
self.offset_id = offset_id
self.add_offset = add_offset
self.limit = limit
self.max_id = max_id
self.min_id = min_id
self.hash = hash
self.from_id = from_id
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
if self.from_id:
self.from_id = utils.get_input_peer(await client.get_input_entity(self.from_id))
def to_dict(self):
return {
'_': 'SearchRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'q': self.q,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter,
'min_date': self.min_date,
'max_date': self.max_date,
'offset_id': self.offset_id,
'add_offset': self.add_offset,
'limit': self.limit,
'max_id': self.max_id,
'min_id': self.min_id,
'hash': self.hash,
'from_id': self.from_id.to_dict() if isinstance(self.from_id, TLObject) else self.from_id,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'b\xa7\xfd\xa0',
struct.pack('<I', (0 if self.from_id is None or self.from_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 2)),
self.peer._bytes(),
self.serialize_bytes(self.q),
b'' if self.from_id is None or self.from_id is False else (self.from_id._bytes()),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
self.filter._bytes(),
self.serialize_datetime(self.min_date),
self.serialize_datetime(self.max_date),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.add_offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.max_id),
struct.pack('<i', self.min_id),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
_q = reader.tgread_string()
if flags & 1:
_from_id = reader.tgread_object()
else:
_from_id = None
if flags & 2:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_filter = reader.tgread_object()
_min_date = reader.tgread_date()
_max_date = reader.tgread_date()
_offset_id = reader.read_int()
_add_offset = reader.read_int()
_limit = reader.read_int()
_max_id = reader.read_int()
_min_id = reader.read_int()
_hash = reader.read_long()
return cls(peer=_peer, q=_q, filter=_filter, min_date=_min_date, max_date=_max_date, offset_id=_offset_id, add_offset=_add_offset, limit=_limit, max_id=_max_id, min_id=_min_id, hash=_hash, from_id=_from_id, top_msg_id=_top_msg_id)
class SearchCustomEmojiRequest(TLRequest):
CONSTRUCTOR_ID = 0x2c11c0d7
SUBCLASS_OF_ID = 0xbcef6aba
# noinspection PyShadowingBuiltins
def __init__(self, emoticon: str, hash: int):
"""
:returns EmojiList: Instance of either EmojiListNotModified, EmojiList.
"""
self.emoticon = emoticon
self.hash = hash
def to_dict(self):
return {
'_': 'SearchCustomEmojiRequest',
'emoticon': self.emoticon,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xd7\xc0\x11,',
self.serialize_bytes(self.emoticon),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_emoticon = reader.tgread_string()
_hash = reader.read_long()
return cls(emoticon=_emoticon, hash=_hash)
class SearchGlobalRequest(TLRequest):
CONSTRUCTOR_ID = 0x4bc6589a
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, q: str, filter: 'TypeMessagesFilter', min_date: Optional[datetime], max_date: Optional[datetime], offset_rate: int, offset_peer: 'TypeInputPeer', offset_id: int, limit: int, folder_id: Optional[int]=None):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.q = q
self.filter = filter
self.min_date = min_date
self.max_date = max_date
self.offset_rate = offset_rate
self.offset_peer = offset_peer
self.offset_id = offset_id
self.limit = limit
self.folder_id = folder_id
async def resolve(self, client, utils):
self.offset_peer = utils.get_input_peer(await client.get_input_entity(self.offset_peer))
def to_dict(self):
return {
'_': 'SearchGlobalRequest',
'q': self.q,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter,
'min_date': self.min_date,
'max_date': self.max_date,
'offset_rate': self.offset_rate,
'offset_peer': self.offset_peer.to_dict() if isinstance(self.offset_peer, TLObject) else self.offset_peer,
'offset_id': self.offset_id,
'limit': self.limit,
'folder_id': self.folder_id
}
def _bytes(self):
return b''.join((
b'\x9aX\xc6K',
struct.pack('<I', (0 if self.folder_id is None or self.folder_id is False else 1)),
b'' if self.folder_id is None or self.folder_id is False else (struct.pack('<i', self.folder_id)),
self.serialize_bytes(self.q),
self.filter._bytes(),
self.serialize_datetime(self.min_date),
self.serialize_datetime(self.max_date),
struct.pack('<i', self.offset_rate),
self.offset_peer._bytes(),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
if flags & 1:
_folder_id = reader.read_int()
else:
_folder_id = None
_q = reader.tgread_string()
_filter = reader.tgread_object()
_min_date = reader.tgread_date()
_max_date = reader.tgread_date()
_offset_rate = reader.read_int()
_offset_peer = reader.tgread_object()
_offset_id = reader.read_int()
_limit = reader.read_int()
return cls(q=_q, filter=_filter, min_date=_min_date, max_date=_max_date, offset_rate=_offset_rate, offset_peer=_offset_peer, offset_id=_offset_id, limit=_limit, folder_id=_folder_id)
class SearchSentMediaRequest(TLRequest):
CONSTRUCTOR_ID = 0x107e31a0
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, q: str, filter: 'TypeMessagesFilter', limit: int):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.q = q
self.filter = filter
self.limit = limit
def to_dict(self):
return {
'_': 'SearchSentMediaRequest',
'q': self.q,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter,
'limit': self.limit
}
def _bytes(self):
return b''.join((
b'\xa01~\x10',
self.serialize_bytes(self.q),
self.filter._bytes(),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
_q = reader.tgread_string()
_filter = reader.tgread_object()
_limit = reader.read_int()
return cls(q=_q, filter=_filter, limit=_limit)
class SearchStickerSetsRequest(TLRequest):
CONSTRUCTOR_ID = 0x35705b8a
SUBCLASS_OF_ID = 0x40df361
# noinspection PyShadowingBuiltins
def __init__(self, q: str, hash: int, exclude_featured: Optional[bool]=None):
"""
:returns messages.FoundStickerSets: Instance of either FoundStickerSetsNotModified, FoundStickerSets.
"""
self.q = q
self.hash = hash
self.exclude_featured = exclude_featured
def to_dict(self):
return {
'_': 'SearchStickerSetsRequest',
'q': self.q,
'hash': self.hash,
'exclude_featured': self.exclude_featured
}
def _bytes(self):
return b''.join((
b'\x8a[p5',
struct.pack('<I', (0 if self.exclude_featured is None or self.exclude_featured is False else 1)),
self.serialize_bytes(self.q),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_exclude_featured = bool(flags & 1)
_q = reader.tgread_string()
_hash = reader.read_long()
return cls(q=_q, hash=_hash, exclude_featured=_exclude_featured)
class SendBotRequestedPeerRequest(TLRequest):
CONSTRUCTOR_ID = 0xfe38d01b
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', msg_id: int, button_id: int, requested_peer: 'TypeInputPeer'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.msg_id = msg_id
self.button_id = button_id
self.requested_peer = requested_peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.requested_peer = utils.get_input_peer(await client.get_input_entity(self.requested_peer))
def to_dict(self):
return {
'_': 'SendBotRequestedPeerRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'button_id': self.button_id,
'requested_peer': self.requested_peer.to_dict() if isinstance(self.requested_peer, TLObject) else self.requested_peer
}
def _bytes(self):
return b''.join((
b'\x1b\xd08\xfe',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
struct.pack('<i', self.button_id),
self.requested_peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
_button_id = reader.read_int()
_requested_peer = reader.tgread_object()
return cls(peer=_peer, msg_id=_msg_id, button_id=_button_id, requested_peer=_requested_peer)
class SendEncryptedRequest(TLRequest):
CONSTRUCTOR_ID = 0x44fa7a15
SUBCLASS_OF_ID = 0xc99e3e50
def __init__(self, peer: 'TypeInputEncryptedChat', data: bytes, silent: Optional[bool]=None, random_id: int=None):
"""
:returns messages.SentEncryptedMessage: Instance of either SentEncryptedMessage, SentEncryptedFile.
"""
self.peer = peer
self.data = data
self.silent = silent
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'SendEncryptedRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'data': self.data,
'silent': self.silent,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'\x15z\xfaD',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 1)),
self.peer._bytes(),
struct.pack('<q', self.random_id),
self.serialize_bytes(self.data),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 1)
_peer = reader.tgread_object()
_random_id = reader.read_long()
_data = reader.tgread_bytes()
return cls(peer=_peer, data=_data, silent=_silent, random_id=_random_id)
class SendEncryptedFileRequest(TLRequest):
CONSTRUCTOR_ID = 0x5559481d
SUBCLASS_OF_ID = 0xc99e3e50
def __init__(self, peer: 'TypeInputEncryptedChat', data: bytes, file: 'TypeInputEncryptedFile', silent: Optional[bool]=None, random_id: int=None):
"""
:returns messages.SentEncryptedMessage: Instance of either SentEncryptedMessage, SentEncryptedFile.
"""
self.peer = peer
self.data = data
self.file = file
self.silent = silent
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'SendEncryptedFileRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'data': self.data,
'file': self.file.to_dict() if isinstance(self.file, TLObject) else self.file,
'silent': self.silent,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'\x1dHYU',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 1)),
self.peer._bytes(),
struct.pack('<q', self.random_id),
self.serialize_bytes(self.data),
self.file._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 1)
_peer = reader.tgread_object()
_random_id = reader.read_long()
_data = reader.tgread_bytes()
_file = reader.tgread_object()
return cls(peer=_peer, data=_data, file=_file, silent=_silent, random_id=_random_id)
class SendEncryptedServiceRequest(TLRequest):
CONSTRUCTOR_ID = 0x32d439a4
SUBCLASS_OF_ID = 0xc99e3e50
def __init__(self, peer: 'TypeInputEncryptedChat', data: bytes, random_id: int=None):
"""
:returns messages.SentEncryptedMessage: Instance of either SentEncryptedMessage, SentEncryptedFile.
"""
self.peer = peer
self.data = data
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
def to_dict(self):
return {
'_': 'SendEncryptedServiceRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'data': self.data,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'\xa49\xd42',
self.peer._bytes(),
struct.pack('<q', self.random_id),
self.serialize_bytes(self.data),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_random_id = reader.read_long()
_data = reader.tgread_bytes()
return cls(peer=_peer, data=_data, random_id=_random_id)
class SendInlineBotResultRequest(TLRequest):
CONSTRUCTOR_ID = 0xd3fbdccb
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', query_id: int, id: str, silent: Optional[bool]=None, background: Optional[bool]=None, clear_draft: Optional[bool]=None, hide_via: Optional[bool]=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, random_id: int=None, schedule_date: Optional[datetime]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.query_id = query_id
self.id = id
self.silent = silent
self.background = background
self.clear_draft = clear_draft
self.hide_via = hide_via
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
self.schedule_date = schedule_date
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'SendInlineBotResultRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'query_id': self.query_id,
'id': self.id,
'silent': self.silent,
'background': self.background,
'clear_draft': self.clear_draft,
'hide_via': self.hide_via,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'random_id': self.random_id,
'schedule_date': self.schedule_date,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\xcb\xdc\xfb\xd3',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 32) | (0 if self.background is None or self.background is False else 64) | (0 if self.clear_draft is None or self.clear_draft is False else 128) | (0 if self.hide_via is None or self.hide_via is False else 2048) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.schedule_date is None or self.schedule_date is False else 1024) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.peer._bytes(),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
struct.pack('<q', self.random_id),
struct.pack('<q', self.query_id),
self.serialize_bytes(self.id),
b'' if self.schedule_date is None or self.schedule_date is False else (self.serialize_datetime(self.schedule_date)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 32)
_background = bool(flags & 64)
_clear_draft = bool(flags & 128)
_hide_via = bool(flags & 2048)
_peer = reader.tgread_object()
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_random_id = reader.read_long()
_query_id = reader.read_long()
_id = reader.tgread_string()
if flags & 1024:
_schedule_date = reader.tgread_date()
else:
_schedule_date = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(peer=_peer, query_id=_query_id, id=_id, silent=_silent, background=_background, clear_draft=_clear_draft, hide_via=_hide_via, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, random_id=_random_id, schedule_date=_schedule_date, send_as=_send_as)
class SendMediaRequest(TLRequest):
CONSTRUCTOR_ID = 0x7547c966
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', media: 'TypeInputMedia', message: str, silent: Optional[bool]=None, background: Optional[bool]=None, clear_draft: Optional[bool]=None, noforwards: Optional[bool]=None, update_stickersets_order: Optional[bool]=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, random_id: int=None, reply_markup: Optional['TypeReplyMarkup']=None, entities: Optional[List['TypeMessageEntity']]=None, schedule_date: Optional[datetime]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.media = media
self.message = message
self.silent = silent
self.background = background
self.clear_draft = clear_draft
self.noforwards = noforwards
self.update_stickersets_order = update_stickersets_order
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
self.reply_markup = reply_markup
self.entities = entities
self.schedule_date = schedule_date
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.media = utils.get_input_media(self.media)
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'SendMediaRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media,
'message': self.message,
'silent': self.silent,
'background': self.background,
'clear_draft': self.clear_draft,
'noforwards': self.noforwards,
'update_stickersets_order': self.update_stickersets_order,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'random_id': self.random_id,
'reply_markup': self.reply_markup.to_dict() if isinstance(self.reply_markup, TLObject) else self.reply_markup,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'schedule_date': self.schedule_date,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'f\xc9Gu',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 32) | (0 if self.background is None or self.background is False else 64) | (0 if self.clear_draft is None or self.clear_draft is False else 128) | (0 if self.noforwards is None or self.noforwards is False else 16384) | (0 if self.update_stickersets_order is None or self.update_stickersets_order is False else 32768) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.reply_markup is None or self.reply_markup is False else 4) | (0 if self.entities is None or self.entities is False else 8) | (0 if self.schedule_date is None or self.schedule_date is False else 1024) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.peer._bytes(),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
self.media._bytes(),
self.serialize_bytes(self.message),
struct.pack('<q', self.random_id),
b'' if self.reply_markup is None or self.reply_markup is False else (self.reply_markup._bytes()),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
b'' if self.schedule_date is None or self.schedule_date is False else (self.serialize_datetime(self.schedule_date)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 32)
_background = bool(flags & 64)
_clear_draft = bool(flags & 128)
_noforwards = bool(flags & 16384)
_update_stickersets_order = bool(flags & 32768)
_peer = reader.tgread_object()
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_media = reader.tgread_object()
_message = reader.tgread_string()
_random_id = reader.read_long()
if flags & 4:
_reply_markup = reader.tgread_object()
else:
_reply_markup = None
if flags & 8:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
if flags & 1024:
_schedule_date = reader.tgread_date()
else:
_schedule_date = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(peer=_peer, media=_media, message=_message, silent=_silent, background=_background, clear_draft=_clear_draft, noforwards=_noforwards, update_stickersets_order=_update_stickersets_order, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, random_id=_random_id, reply_markup=_reply_markup, entities=_entities, schedule_date=_schedule_date, send_as=_send_as)
class SendMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0x1cc20387
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', message: str, no_webpage: Optional[bool]=None, silent: Optional[bool]=None, background: Optional[bool]=None, clear_draft: Optional[bool]=None, noforwards: Optional[bool]=None, update_stickersets_order: Optional[bool]=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, random_id: int=None, reply_markup: Optional['TypeReplyMarkup']=None, entities: Optional[List['TypeMessageEntity']]=None, schedule_date: Optional[datetime]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.message = message
self.no_webpage = no_webpage
self.silent = silent
self.background = background
self.clear_draft = clear_draft
self.noforwards = noforwards
self.update_stickersets_order = update_stickersets_order
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
self.reply_markup = reply_markup
self.entities = entities
self.schedule_date = schedule_date
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'SendMessageRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'message': self.message,
'no_webpage': self.no_webpage,
'silent': self.silent,
'background': self.background,
'clear_draft': self.clear_draft,
'noforwards': self.noforwards,
'update_stickersets_order': self.update_stickersets_order,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'random_id': self.random_id,
'reply_markup': self.reply_markup.to_dict() if isinstance(self.reply_markup, TLObject) else self.reply_markup,
'entities': [] if self.entities is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.entities],
'schedule_date': self.schedule_date,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\x87\x03\xc2\x1c',
struct.pack('<I', (0 if self.no_webpage is None or self.no_webpage is False else 2) | (0 if self.silent is None or self.silent is False else 32) | (0 if self.background is None or self.background is False else 64) | (0 if self.clear_draft is None or self.clear_draft is False else 128) | (0 if self.noforwards is None or self.noforwards is False else 16384) | (0 if self.update_stickersets_order is None or self.update_stickersets_order is False else 32768) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.reply_markup is None or self.reply_markup is False else 4) | (0 if self.entities is None or self.entities is False else 8) | (0 if self.schedule_date is None or self.schedule_date is False else 1024) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.peer._bytes(),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
self.serialize_bytes(self.message),
struct.pack('<q', self.random_id),
b'' if self.reply_markup is None or self.reply_markup is False else (self.reply_markup._bytes()),
b'' if self.entities is None or self.entities is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.entities)),b''.join(x._bytes() for x in self.entities))),
b'' if self.schedule_date is None or self.schedule_date is False else (self.serialize_datetime(self.schedule_date)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_no_webpage = bool(flags & 2)
_silent = bool(flags & 32)
_background = bool(flags & 64)
_clear_draft = bool(flags & 128)
_noforwards = bool(flags & 16384)
_update_stickersets_order = bool(flags & 32768)
_peer = reader.tgread_object()
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_message = reader.tgread_string()
_random_id = reader.read_long()
if flags & 4:
_reply_markup = reader.tgread_object()
else:
_reply_markup = None
if flags & 8:
reader.read_int()
_entities = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_entities.append(_x)
else:
_entities = None
if flags & 1024:
_schedule_date = reader.tgread_date()
else:
_schedule_date = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(peer=_peer, message=_message, no_webpage=_no_webpage, silent=_silent, background=_background, clear_draft=_clear_draft, noforwards=_noforwards, update_stickersets_order=_update_stickersets_order, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, random_id=_random_id, reply_markup=_reply_markup, entities=_entities, schedule_date=_schedule_date, send_as=_send_as)
class SendMultiMediaRequest(TLRequest):
CONSTRUCTOR_ID = 0xb6f11a1c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', multi_media: List['TypeInputSingleMedia'], silent: Optional[bool]=None, background: Optional[bool]=None, clear_draft: Optional[bool]=None, noforwards: Optional[bool]=None, update_stickersets_order: Optional[bool]=None, reply_to_msg_id: Optional[int]=None, top_msg_id: Optional[int]=None, schedule_date: Optional[datetime]=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.multi_media = multi_media
self.silent = silent
self.background = background
self.clear_draft = clear_draft
self.noforwards = noforwards
self.update_stickersets_order = update_stickersets_order
self.reply_to_msg_id = reply_to_msg_id
self.top_msg_id = top_msg_id
self.schedule_date = schedule_date
self.send_as = send_as
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'SendMultiMediaRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'multi_media': [] if self.multi_media is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.multi_media],
'silent': self.silent,
'background': self.background,
'clear_draft': self.clear_draft,
'noforwards': self.noforwards,
'update_stickersets_order': self.update_stickersets_order,
'reply_to_msg_id': self.reply_to_msg_id,
'top_msg_id': self.top_msg_id,
'schedule_date': self.schedule_date,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'\x1c\x1a\xf1\xb6',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 32) | (0 if self.background is None or self.background is False else 64) | (0 if self.clear_draft is None or self.clear_draft is False else 128) | (0 if self.noforwards is None or self.noforwards is False else 16384) | (0 if self.update_stickersets_order is None or self.update_stickersets_order is False else 32768) | (0 if self.reply_to_msg_id is None or self.reply_to_msg_id is False else 1) | (0 if self.top_msg_id is None or self.top_msg_id is False else 512) | (0 if self.schedule_date is None or self.schedule_date is False else 1024) | (0 if self.send_as is None or self.send_as is False else 8192)),
self.peer._bytes(),
b'' if self.reply_to_msg_id is None or self.reply_to_msg_id is False else (struct.pack('<i', self.reply_to_msg_id)),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.multi_media)),b''.join(x._bytes() for x in self.multi_media),
b'' if self.schedule_date is None or self.schedule_date is False else (self.serialize_datetime(self.schedule_date)),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 32)
_background = bool(flags & 64)
_clear_draft = bool(flags & 128)
_noforwards = bool(flags & 16384)
_update_stickersets_order = bool(flags & 32768)
_peer = reader.tgread_object()
if flags & 1:
_reply_to_msg_id = reader.read_int()
else:
_reply_to_msg_id = None
if flags & 512:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
reader.read_int()
_multi_media = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_multi_media.append(_x)
if flags & 1024:
_schedule_date = reader.tgread_date()
else:
_schedule_date = None
if flags & 8192:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(peer=_peer, multi_media=_multi_media, silent=_silent, background=_background, clear_draft=_clear_draft, noforwards=_noforwards, update_stickersets_order=_update_stickersets_order, reply_to_msg_id=_reply_to_msg_id, top_msg_id=_top_msg_id, schedule_date=_schedule_date, send_as=_send_as)
class SendReactionRequest(TLRequest):
CONSTRUCTOR_ID = 0xd30d78d4
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', msg_id: int, big: Optional[bool]=None, add_to_recent: Optional[bool]=None, reaction: Optional[List['TypeReaction']]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.msg_id = msg_id
self.big = big
self.add_to_recent = add_to_recent
self.reaction = reaction
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SendReactionRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'big': self.big,
'add_to_recent': self.add_to_recent,
'reaction': [] if self.reaction is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.reaction]
}
def _bytes(self):
return b''.join((
b'\xd4x\r\xd3',
struct.pack('<I', (0 if self.big is None or self.big is False else 2) | (0 if self.add_to_recent is None or self.add_to_recent is False else 4) | (0 if self.reaction is None or self.reaction is False else 1)),
self.peer._bytes(),
struct.pack('<i', self.msg_id),
b'' if self.reaction is None or self.reaction is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.reaction)),b''.join(x._bytes() for x in self.reaction))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_big = bool(flags & 2)
_add_to_recent = bool(flags & 4)
_peer = reader.tgread_object()
_msg_id = reader.read_int()
if flags & 1:
reader.read_int()
_reaction = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_reaction.append(_x)
else:
_reaction = None
return cls(peer=_peer, msg_id=_msg_id, big=_big, add_to_recent=_add_to_recent, reaction=_reaction)
class SendScheduledMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xbd38850a
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: List[int]):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SendScheduledMessagesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'\n\x858\xbd',
self.peer._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(peer=_peer, id=_id)
class SendScreenshotNotificationRequest(TLRequest):
CONSTRUCTOR_ID = 0xc97df020
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', reply_to_msg_id: int, random_id: int=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.reply_to_msg_id = reply_to_msg_id
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SendScreenshotNotificationRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'reply_to_msg_id': self.reply_to_msg_id,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b' \xf0}\xc9',
self.peer._bytes(),
struct.pack('<i', self.reply_to_msg_id),
struct.pack('<q', self.random_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_reply_to_msg_id = reader.read_int()
_random_id = reader.read_long()
return cls(peer=_peer, reply_to_msg_id=_reply_to_msg_id, random_id=_random_id)
class SendVoteRequest(TLRequest):
CONSTRUCTOR_ID = 0x10ea6184
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', msg_id: int, options: List[bytes]):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.msg_id = msg_id
self.options = options
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SendVoteRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id,
'options': [] if self.options is None else self.options[:]
}
def _bytes(self):
return b''.join((
b'\x84a\xea\x10',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.options)),b''.join(self.serialize_bytes(x) for x in self.options),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
reader.read_int()
_options = []
for _ in range(reader.read_int()):
_x = reader.tgread_bytes()
_options.append(_x)
return cls(peer=_peer, msg_id=_msg_id, options=_options)
class SendWebViewDataRequest(TLRequest):
CONSTRUCTOR_ID = 0xdc0242c8
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, bot: 'TypeInputUser', button_text: str, data: str, random_id: int=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.bot = bot
self.button_text = button_text
self.data = data
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
async def resolve(self, client, utils):
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
def to_dict(self):
return {
'_': 'SendWebViewDataRequest',
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'button_text': self.button_text,
'data': self.data,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'\xc8B\x02\xdc',
self.bot._bytes(),
struct.pack('<q', self.random_id),
self.serialize_bytes(self.button_text),
self.serialize_bytes(self.data),
))
@classmethod
def from_reader(cls, reader):
_bot = reader.tgread_object()
_random_id = reader.read_long()
_button_text = reader.tgread_string()
_data = reader.tgread_string()
return cls(bot=_bot, button_text=_button_text, data=_data, random_id=_random_id)
class SendWebViewResultMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0xa4314f5
SUBCLASS_OF_ID = 0x75e49312
def __init__(self, bot_query_id: str, result: 'TypeInputBotInlineResult'):
"""
:returns WebViewMessageSent: Instance of WebViewMessageSent.
"""
self.bot_query_id = bot_query_id
self.result = result
def to_dict(self):
return {
'_': 'SendWebViewResultMessageRequest',
'bot_query_id': self.bot_query_id,
'result': self.result.to_dict() if isinstance(self.result, TLObject) else self.result
}
def _bytes(self):
return b''.join((
b'\xf5\x14C\n',
self.serialize_bytes(self.bot_query_id),
self.result._bytes(),
))
@classmethod
def from_reader(cls, reader):
_bot_query_id = reader.tgread_string()
_result = reader.tgread_object()
return cls(bot_query_id=_bot_query_id, result=_result)
class SetBotCallbackAnswerRequest(TLRequest):
CONSTRUCTOR_ID = 0xd58f130a
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, query_id: int, cache_time: int, alert: Optional[bool]=None, message: Optional[str]=None, url: Optional[str]=None):
"""
:returns Bool: This type has no constructors.
"""
self.query_id = query_id
self.cache_time = cache_time
self.alert = alert
self.message = message
self.url = url
def to_dict(self):
return {
'_': 'SetBotCallbackAnswerRequest',
'query_id': self.query_id,
'cache_time': self.cache_time,
'alert': self.alert,
'message': self.message,
'url': self.url
}
def _bytes(self):
return b''.join((
b'\n\x13\x8f\xd5',
struct.pack('<I', (0 if self.alert is None or self.alert is False else 2) | (0 if self.message is None or self.message is False else 1) | (0 if self.url is None or self.url is False else 4)),
struct.pack('<q', self.query_id),
b'' if self.message is None or self.message is False else (self.serialize_bytes(self.message)),
b'' if self.url is None or self.url is False else (self.serialize_bytes(self.url)),
struct.pack('<i', self.cache_time),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_alert = bool(flags & 2)
_query_id = reader.read_long()
if flags & 1:
_message = reader.tgread_string()
else:
_message = None
if flags & 4:
_url = reader.tgread_string()
else:
_url = None
_cache_time = reader.read_int()
return cls(query_id=_query_id, cache_time=_cache_time, alert=_alert, message=_message, url=_url)
class SetBotPrecheckoutResultsRequest(TLRequest):
CONSTRUCTOR_ID = 0x9c2dd95
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, query_id: int, success: Optional[bool]=None, error: Optional[str]=None):
"""
:returns Bool: This type has no constructors.
"""
self.query_id = query_id
self.success = success
self.error = error
def to_dict(self):
return {
'_': 'SetBotPrecheckoutResultsRequest',
'query_id': self.query_id,
'success': self.success,
'error': self.error
}
def _bytes(self):
return b''.join((
b'\x95\xdd\xc2\t',
struct.pack('<I', (0 if self.success is None or self.success is False else 2) | (0 if self.error is None or self.error is False else 1)),
struct.pack('<q', self.query_id),
b'' if self.error is None or self.error is False else (self.serialize_bytes(self.error)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_success = bool(flags & 2)
_query_id = reader.read_long()
if flags & 1:
_error = reader.tgread_string()
else:
_error = None
return cls(query_id=_query_id, success=_success, error=_error)
class SetBotShippingResultsRequest(TLRequest):
CONSTRUCTOR_ID = 0xe5f672fa
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, query_id: int, error: Optional[str]=None, shipping_options: Optional[List['TypeShippingOption']]=None):
"""
:returns Bool: This type has no constructors.
"""
self.query_id = query_id
self.error = error
self.shipping_options = shipping_options
def to_dict(self):
return {
'_': 'SetBotShippingResultsRequest',
'query_id': self.query_id,
'error': self.error,
'shipping_options': [] if self.shipping_options is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.shipping_options]
}
def _bytes(self):
return b''.join((
b'\xfar\xf6\xe5',
struct.pack('<I', (0 if self.error is None or self.error is False else 1) | (0 if self.shipping_options is None or self.shipping_options is False else 2)),
struct.pack('<q', self.query_id),
b'' if self.error is None or self.error is False else (self.serialize_bytes(self.error)),
b'' if self.shipping_options is None or self.shipping_options is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.shipping_options)),b''.join(x._bytes() for x in self.shipping_options))),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_query_id = reader.read_long()
if flags & 1:
_error = reader.tgread_string()
else:
_error = None
if flags & 2:
reader.read_int()
_shipping_options = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_shipping_options.append(_x)
else:
_shipping_options = None
return cls(query_id=_query_id, error=_error, shipping_options=_shipping_options)
class SetChatAvailableReactionsRequest(TLRequest):
CONSTRUCTOR_ID = 0xfeb16771
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', available_reactions: 'TypeChatReactions'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.available_reactions = available_reactions
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SetChatAvailableReactionsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'available_reactions': self.available_reactions.to_dict() if isinstance(self.available_reactions, TLObject) else self.available_reactions
}
def _bytes(self):
return b''.join((
b'qg\xb1\xfe',
self.peer._bytes(),
self.available_reactions._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_available_reactions = reader.tgread_object()
return cls(peer=_peer, available_reactions=_available_reactions)
class SetChatThemeRequest(TLRequest):
CONSTRUCTOR_ID = 0xe63be13f
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', emoticon: str):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.emoticon = emoticon
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SetChatThemeRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'emoticon': self.emoticon
}
def _bytes(self):
return b''.join((
b'?\xe1;\xe6',
self.peer._bytes(),
self.serialize_bytes(self.emoticon),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_emoticon = reader.tgread_string()
return cls(peer=_peer, emoticon=_emoticon)
class SetChatWallPaperRequest(TLRequest):
CONSTRUCTOR_ID = 0x8ffacae1
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', wallpaper: Optional['TypeInputWallPaper']=None, settings: Optional['TypeWallPaperSettings']=None, id: Optional[int]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.wallpaper = wallpaper
self.settings = settings
self.id = id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SetChatWallPaperRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'wallpaper': self.wallpaper.to_dict() if isinstance(self.wallpaper, TLObject) else self.wallpaper,
'settings': self.settings.to_dict() if isinstance(self.settings, TLObject) else self.settings,
'id': self.id
}
def _bytes(self):
return b''.join((
b'\xe1\xca\xfa\x8f',
struct.pack('<I', (0 if self.wallpaper is None or self.wallpaper is False else 1) | (0 if self.settings is None or self.settings is False else 4) | (0 if self.id is None or self.id is False else 2)),
self.peer._bytes(),
b'' if self.wallpaper is None or self.wallpaper is False else (self.wallpaper._bytes()),
b'' if self.settings is None or self.settings is False else (self.settings._bytes()),
b'' if self.id is None or self.id is False else (struct.pack('<i', self.id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_wallpaper = reader.tgread_object()
else:
_wallpaper = None
if flags & 4:
_settings = reader.tgread_object()
else:
_settings = None
if flags & 2:
_id = reader.read_int()
else:
_id = None
return cls(peer=_peer, wallpaper=_wallpaper, settings=_settings, id=_id)
class SetDefaultHistoryTTLRequest(TLRequest):
CONSTRUCTOR_ID = 0x9eb51445
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, period: int):
"""
:returns Bool: This type has no constructors.
"""
self.period = period
def to_dict(self):
return {
'_': 'SetDefaultHistoryTTLRequest',
'period': self.period
}
def _bytes(self):
return b''.join((
b'E\x14\xb5\x9e',
struct.pack('<i', self.period),
))
@classmethod
def from_reader(cls, reader):
_period = reader.read_int()
return cls(period=_period)
class SetDefaultReactionRequest(TLRequest):
CONSTRUCTOR_ID = 0x4f47a016
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, reaction: 'TypeReaction'):
"""
:returns Bool: This type has no constructors.
"""
self.reaction = reaction
def to_dict(self):
return {
'_': 'SetDefaultReactionRequest',
'reaction': self.reaction.to_dict() if isinstance(self.reaction, TLObject) else self.reaction
}
def _bytes(self):
return b''.join((
b'\x16\xa0GO',
self.reaction._bytes(),
))
@classmethod
def from_reader(cls, reader):
_reaction = reader.tgread_object()
return cls(reaction=_reaction)
class SetEncryptedTypingRequest(TLRequest):
CONSTRUCTOR_ID = 0x791451ed
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputEncryptedChat', typing: bool):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.typing = typing
def to_dict(self):
return {
'_': 'SetEncryptedTypingRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'typing': self.typing
}
def _bytes(self):
return b''.join((
b'\xedQ\x14y',
self.peer._bytes(),
b'\xb5ur\x99' if self.typing else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_typing = reader.tgread_bool()
return cls(peer=_peer, typing=_typing)
class SetGameScoreRequest(TLRequest):
CONSTRUCTOR_ID = 0x8ef8ecc0
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, user_id: 'TypeInputUser', score: int, edit_message: Optional[bool]=None, force: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
self.user_id = user_id
self.score = score
self.edit_message = edit_message
self.force = force
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'SetGameScoreRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'score': self.score,
'edit_message': self.edit_message,
'force': self.force
}
def _bytes(self):
return b''.join((
b'\xc0\xec\xf8\x8e',
struct.pack('<I', (0 if self.edit_message is None or self.edit_message is False else 1) | (0 if self.force is None or self.force is False else 2)),
self.peer._bytes(),
struct.pack('<i', self.id),
self.user_id._bytes(),
struct.pack('<i', self.score),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_edit_message = bool(flags & 1)
_force = bool(flags & 2)
_peer = reader.tgread_object()
_id = reader.read_int()
_user_id = reader.tgread_object()
_score = reader.read_int()
return cls(peer=_peer, id=_id, user_id=_user_id, score=_score, edit_message=_edit_message, force=_force)
class SetHistoryTTLRequest(TLRequest):
CONSTRUCTOR_ID = 0xb80e5fe4
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', period: int):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.period = period
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SetHistoryTTLRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'period': self.period
}
def _bytes(self):
return b''.join((
b'\xe4_\x0e\xb8',
self.peer._bytes(),
struct.pack('<i', self.period),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_period = reader.read_int()
return cls(peer=_peer, period=_period)
class SetInlineBotResultsRequest(TLRequest):
CONSTRUCTOR_ID = 0xbb12a419
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, query_id: int, results: List['TypeInputBotInlineResult'], cache_time: int, gallery: Optional[bool]=None, private: Optional[bool]=None, next_offset: Optional[str]=None, switch_pm: Optional['TypeInlineBotSwitchPM']=None, switch_webview: Optional['TypeInlineBotWebView']=None):
"""
:returns Bool: This type has no constructors.
"""
self.query_id = query_id
self.results = results
self.cache_time = cache_time
self.gallery = gallery
self.private = private
self.next_offset = next_offset
self.switch_pm = switch_pm
self.switch_webview = switch_webview
def to_dict(self):
return {
'_': 'SetInlineBotResultsRequest',
'query_id': self.query_id,
'results': [] if self.results is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.results],
'cache_time': self.cache_time,
'gallery': self.gallery,
'private': self.private,
'next_offset': self.next_offset,
'switch_pm': self.switch_pm.to_dict() if isinstance(self.switch_pm, TLObject) else self.switch_pm,
'switch_webview': self.switch_webview.to_dict() if isinstance(self.switch_webview, TLObject) else self.switch_webview
}
def _bytes(self):
return b''.join((
b'\x19\xa4\x12\xbb',
struct.pack('<I', (0 if self.gallery is None or self.gallery is False else 1) | (0 if self.private is None or self.private is False else 2) | (0 if self.next_offset is None or self.next_offset is False else 4) | (0 if self.switch_pm is None or self.switch_pm is False else 8) | (0 if self.switch_webview is None or self.switch_webview is False else 16)),
struct.pack('<q', self.query_id),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.results)),b''.join(x._bytes() for x in self.results),
struct.pack('<i', self.cache_time),
b'' if self.next_offset is None or self.next_offset is False else (self.serialize_bytes(self.next_offset)),
b'' if self.switch_pm is None or self.switch_pm is False else (self.switch_pm._bytes()),
b'' if self.switch_webview is None or self.switch_webview is False else (self.switch_webview._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_gallery = bool(flags & 1)
_private = bool(flags & 2)
_query_id = reader.read_long()
reader.read_int()
_results = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_results.append(_x)
_cache_time = reader.read_int()
if flags & 4:
_next_offset = reader.tgread_string()
else:
_next_offset = None
if flags & 8:
_switch_pm = reader.tgread_object()
else:
_switch_pm = None
if flags & 16:
_switch_webview = reader.tgread_object()
else:
_switch_webview = None
return cls(query_id=_query_id, results=_results, cache_time=_cache_time, gallery=_gallery, private=_private, next_offset=_next_offset, switch_pm=_switch_pm, switch_webview=_switch_webview)
class SetInlineGameScoreRequest(TLRequest):
CONSTRUCTOR_ID = 0x15ad9f64
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: 'TypeInputBotInlineMessageID', user_id: 'TypeInputUser', score: int, edit_message: Optional[bool]=None, force: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
self.user_id = user_id
self.score = score
self.edit_message = edit_message
self.force = force
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'SetInlineGameScoreRequest',
'id': self.id.to_dict() if isinstance(self.id, TLObject) else self.id,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'score': self.score,
'edit_message': self.edit_message,
'force': self.force
}
def _bytes(self):
return b''.join((
b'd\x9f\xad\x15',
struct.pack('<I', (0 if self.edit_message is None or self.edit_message is False else 1) | (0 if self.force is None or self.force is False else 2)),
self.id._bytes(),
self.user_id._bytes(),
struct.pack('<i', self.score),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_edit_message = bool(flags & 1)
_force = bool(flags & 2)
_id = reader.tgread_object()
_user_id = reader.tgread_object()
_score = reader.read_int()
return cls(id=_id, user_id=_user_id, score=_score, edit_message=_edit_message, force=_force)
class SetTypingRequest(TLRequest):
CONSTRUCTOR_ID = 0x58943ee2
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', action: 'TypeSendMessageAction', top_msg_id: Optional[int]=None):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.action = action
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'SetTypingRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'action': self.action.to_dict() if isinstance(self.action, TLObject) else self.action,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'\xe2>\x94X',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
self.action._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
_action = reader.tgread_object()
return cls(peer=_peer, action=_action, top_msg_id=_top_msg_id)
class StartBotRequest(TLRequest):
CONSTRUCTOR_ID = 0xe6df7378
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, bot: 'TypeInputUser', peer: 'TypeInputPeer', start_param: str, random_id: int=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.bot = bot
self.peer = peer
self.start_param = start_param
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
async def resolve(self, client, utils):
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'StartBotRequest',
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'start_param': self.start_param,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'xs\xdf\xe6',
self.bot._bytes(),
self.peer._bytes(),
struct.pack('<q', self.random_id),
self.serialize_bytes(self.start_param),
))
@classmethod
def from_reader(cls, reader):
_bot = reader.tgread_object()
_peer = reader.tgread_object()
_random_id = reader.read_long()
_start_param = reader.tgread_string()
return cls(bot=_bot, peer=_peer, start_param=_start_param, random_id=_random_id)
class StartHistoryImportRequest(TLRequest):
CONSTRUCTOR_ID = 0xb43df344
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', import_id: int):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.import_id = import_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'StartHistoryImportRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'import_id': self.import_id
}
def _bytes(self):
return b''.join((
b'D\xf3=\xb4',
self.peer._bytes(),
struct.pack('<q', self.import_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_import_id = reader.read_long()
return cls(peer=_peer, import_id=_import_id)
class ToggleBotInAttachMenuRequest(TLRequest):
CONSTRUCTOR_ID = 0x69f59d69
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, bot: 'TypeInputUser', enabled: bool, write_allowed: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.bot = bot
self.enabled = enabled
self.write_allowed = write_allowed
async def resolve(self, client, utils):
self.bot = utils.get_input_user(await client.get_input_entity(self.bot))
def to_dict(self):
return {
'_': 'ToggleBotInAttachMenuRequest',
'bot': self.bot.to_dict() if isinstance(self.bot, TLObject) else self.bot,
'enabled': self.enabled,
'write_allowed': self.write_allowed
}
def _bytes(self):
return b''.join((
b'i\x9d\xf5i',
struct.pack('<I', (0 if self.write_allowed is None or self.write_allowed is False else 1)),
self.bot._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_write_allowed = bool(flags & 1)
_bot = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(bot=_bot, enabled=_enabled, write_allowed=_write_allowed)
class ToggleDialogPinRequest(TLRequest):
CONSTRUCTOR_ID = 0xa731e257
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputDialogPeer', pinned: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.pinned = pinned
async def resolve(self, client, utils):
self.peer = await client._get_input_dialog(self.peer)
def to_dict(self):
return {
'_': 'ToggleDialogPinRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'pinned': self.pinned
}
def _bytes(self):
return b''.join((
b'W\xe21\xa7',
struct.pack('<I', (0 if self.pinned is None or self.pinned is False else 1)),
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_pinned = bool(flags & 1)
_peer = reader.tgread_object()
return cls(peer=_peer, pinned=_pinned)
class ToggleNoForwardsRequest(TLRequest):
CONSTRUCTOR_ID = 0xb11eafa2
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, peer: 'TypeInputPeer', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.enabled = enabled
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'ToggleNoForwardsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'\xa2\xaf\x1e\xb1',
self.peer._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(peer=_peer, enabled=_enabled)
class TogglePeerTranslationsRequest(TLRequest):
CONSTRUCTOR_ID = 0xe47cb579
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, peer: 'TypeInputPeer', disabled: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.peer = peer
self.disabled = disabled
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'TogglePeerTranslationsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'disabled': self.disabled
}
def _bytes(self):
return b''.join((
b'y\xb5|\xe4',
struct.pack('<I', (0 if self.disabled is None or self.disabled is False else 1)),
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_disabled = bool(flags & 1)
_peer = reader.tgread_object()
return cls(peer=_peer, disabled=_disabled)
class ToggleStickerSetsRequest(TLRequest):
CONSTRUCTOR_ID = 0xb5052fea
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, stickersets: List['TypeInputStickerSet'], uninstall: Optional[bool]=None, archive: Optional[bool]=None, unarchive: Optional[bool]=None):
"""
:returns Bool: This type has no constructors.
"""
self.stickersets = stickersets
self.uninstall = uninstall
self.archive = archive
self.unarchive = unarchive
def to_dict(self):
return {
'_': 'ToggleStickerSetsRequest',
'stickersets': [] if self.stickersets is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.stickersets],
'uninstall': self.uninstall,
'archive': self.archive,
'unarchive': self.unarchive
}
def _bytes(self):
return b''.join((
b'\xea/\x05\xb5',
struct.pack('<I', (0 if self.uninstall is None or self.uninstall is False else 1) | (0 if self.archive is None or self.archive is False else 2) | (0 if self.unarchive is None or self.unarchive is False else 4)),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickersets)),b''.join(x._bytes() for x in self.stickersets),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_uninstall = bool(flags & 1)
_archive = bool(flags & 2)
_unarchive = bool(flags & 4)
reader.read_int()
_stickersets = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickersets.append(_x)
return cls(stickersets=_stickersets, uninstall=_uninstall, archive=_archive, unarchive=_unarchive)
class TranscribeAudioRequest(TLRequest):
CONSTRUCTOR_ID = 0x269e9a49
SUBCLASS_OF_ID = 0x21b24936
def __init__(self, peer: 'TypeInputPeer', msg_id: int):
"""
:returns messages.TranscribedAudio: Instance of TranscribedAudio.
"""
self.peer = peer
self.msg_id = msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'TranscribeAudioRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'msg_id': self.msg_id
}
def _bytes(self):
return b''.join((
b'I\x9a\x9e&',
self.peer._bytes(),
struct.pack('<i', self.msg_id),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_msg_id = reader.read_int()
return cls(peer=_peer, msg_id=_msg_id)
class TranslateTextRequest(TLRequest):
CONSTRUCTOR_ID = 0x63183030
SUBCLASS_OF_ID = 0x24243e8
# noinspection PyShadowingBuiltins
def __init__(self, to_lang: str, peer: Optional['TypeInputPeer']=None, id: Optional[List[int]]=None, text: Optional[List['TypeTextWithEntities']]=None):
"""
:returns messages.TranslatedText: Instance of TranslateResult.
"""
self.to_lang = to_lang
self.peer = peer
self.id = id
self.text = text
async def resolve(self, client, utils):
if self.peer:
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'TranslateTextRequest',
'to_lang': self.to_lang,
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': [] if self.id is None else self.id[:],
'text': [] if self.text is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.text]
}
def _bytes(self):
assert ((self.peer or self.peer is not None) and (self.id or self.id is not None)) or ((self.peer is None or self.peer is False) and (self.id is None or self.id is False)), 'peer, id parameters must all be False-y (like None) or all me True-y'
return b''.join((
b'00\x18c',
struct.pack('<I', (0 if self.peer is None or self.peer is False else 1) | (0 if self.id is None or self.id is False else 1) | (0 if self.text is None or self.text is False else 2)),
b'' if self.peer is None or self.peer is False else (self.peer._bytes()),
b'' if self.id is None or self.id is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id))),
b'' if self.text is None or self.text is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.text)),b''.join(x._bytes() for x in self.text))),
self.serialize_bytes(self.to_lang),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
if flags & 1:
_peer = reader.tgread_object()
else:
_peer = None
if flags & 1:
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
else:
_id = None
if flags & 2:
reader.read_int()
_text = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_text.append(_x)
else:
_text = None
_to_lang = reader.tgread_string()
return cls(to_lang=_to_lang, peer=_peer, id=_id, text=_text)
class UninstallStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xf96e55de
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, stickerset: 'TypeInputStickerSet'):
"""
:returns Bool: This type has no constructors.
"""
self.stickerset = stickerset
def to_dict(self):
return {
'_': 'UninstallStickerSetRequest',
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset
}
def _bytes(self):
return b''.join((
b'\xdeUn\xf9',
self.stickerset._bytes(),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
return cls(stickerset=_stickerset)
class UnpinAllMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xee22b9a8
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, peer: 'TypeInputPeer', top_msg_id: Optional[int]=None):
"""
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
self.peer = peer
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'UnpinAllMessagesRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'\xa8\xb9"\xee',
struct.pack('<I', (0 if self.top_msg_id is None or self.top_msg_id is False else 1)),
self.peer._bytes(),
b'' if self.top_msg_id is None or self.top_msg_id is False else (struct.pack('<i', self.top_msg_id)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_peer = reader.tgread_object()
if flags & 1:
_top_msg_id = reader.read_int()
else:
_top_msg_id = None
return cls(peer=_peer, top_msg_id=_top_msg_id)
class UpdateDialogFilterRequest(TLRequest):
CONSTRUCTOR_ID = 0x1ad4a04a
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, id: int, filter: Optional['TypeDialogFilter']=None):
"""
:returns Bool: This type has no constructors.
"""
self.id = id
self.filter = filter
def to_dict(self):
return {
'_': 'UpdateDialogFilterRequest',
'id': self.id,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter
}
def _bytes(self):
return b''.join((
b'J\xa0\xd4\x1a',
struct.pack('<I', (0 if self.filter is None or self.filter is False else 1)),
struct.pack('<i', self.id),
b'' if self.filter is None or self.filter is False else (self.filter._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_id = reader.read_int()
if flags & 1:
_filter = reader.tgread_object()
else:
_filter = None
return cls(id=_id, filter=_filter)
class UpdateDialogFiltersOrderRequest(TLRequest):
CONSTRUCTOR_ID = 0xc563c1e4
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, order: List[int]):
"""
:returns Bool: This type has no constructors.
"""
self.order = order
def to_dict(self):
return {
'_': 'UpdateDialogFiltersOrderRequest',
'order': [] if self.order is None else self.order[:]
}
def _bytes(self):
return b''.join((
b'\xe4\xc1c\xc5',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.order)),b''.join(struct.pack('<i', x) for x in self.order),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_order = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_order.append(_x)
return cls(order=_order)
class UpdatePinnedMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0xd2aaf7ec
SUBCLASS_OF_ID = 0x8af52aac
# noinspection PyShadowingBuiltins
def __init__(self, peer: 'TypeInputPeer', id: int, silent: Optional[bool]=None, unpin: Optional[bool]=None, pm_oneside: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.peer = peer
self.id = id
self.silent = silent
self.unpin = unpin
self.pm_oneside = pm_oneside
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'UpdatePinnedMessageRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'id': self.id,
'silent': self.silent,
'unpin': self.unpin,
'pm_oneside': self.pm_oneside
}
def _bytes(self):
return b''.join((
b'\xec\xf7\xaa\xd2',
struct.pack('<I', (0 if self.silent is None or self.silent is False else 1) | (0 if self.unpin is None or self.unpin is False else 2) | (0 if self.pm_oneside is None or self.pm_oneside is False else 4)),
self.peer._bytes(),
struct.pack('<i', self.id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_silent = bool(flags & 1)
_unpin = bool(flags & 2)
_pm_oneside = bool(flags & 4)
_peer = reader.tgread_object()
_id = reader.read_int()
return cls(peer=_peer, id=_id, silent=_silent, unpin=_unpin, pm_oneside=_pm_oneside)
class UploadEncryptedFileRequest(TLRequest):
CONSTRUCTOR_ID = 0x5057c497
SUBCLASS_OF_ID = 0x842a67c0
def __init__(self, peer: 'TypeInputEncryptedChat', file: 'TypeInputEncryptedFile'):
"""
:returns EncryptedFile: Instance of either EncryptedFileEmpty, EncryptedFile.
"""
self.peer = peer
self.file = file
def to_dict(self):
return {
'_': 'UploadEncryptedFileRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'file': self.file.to_dict() if isinstance(self.file, TLObject) else self.file
}
def _bytes(self):
return b''.join((
b'\x97\xc4WP',
self.peer._bytes(),
self.file._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_file = reader.tgread_object()
return cls(peer=_peer, file=_file)
class UploadImportedMediaRequest(TLRequest):
CONSTRUCTOR_ID = 0x2a862092
SUBCLASS_OF_ID = 0x476cbe32
def __init__(self, peer: 'TypeInputPeer', import_id: int, file_name: str, media: 'TypeInputMedia'):
"""
:returns MessageMedia: Instance of either MessageMediaEmpty, MessageMediaPhoto, MessageMediaGeo, MessageMediaContact, MessageMediaUnsupported, MessageMediaDocument, MessageMediaWebPage, MessageMediaVenue, MessageMediaGame, MessageMediaInvoice, MessageMediaGeoLive, MessageMediaPoll, MessageMediaDice.
"""
self.peer = peer
self.import_id = import_id
self.file_name = file_name
self.media = media
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.media = utils.get_input_media(self.media)
def to_dict(self):
return {
'_': 'UploadImportedMediaRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'import_id': self.import_id,
'file_name': self.file_name,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media
}
def _bytes(self):
return b''.join((
b'\x92 \x86*',
self.peer._bytes(),
struct.pack('<q', self.import_id),
self.serialize_bytes(self.file_name),
self.media._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_import_id = reader.read_long()
_file_name = reader.tgread_string()
_media = reader.tgread_object()
return cls(peer=_peer, import_id=_import_id, file_name=_file_name, media=_media)
class UploadMediaRequest(TLRequest):
CONSTRUCTOR_ID = 0x519bc2b1
SUBCLASS_OF_ID = 0x476cbe32
def __init__(self, peer: 'TypeInputPeer', media: 'TypeInputMedia'):
"""
:returns MessageMedia: Instance of either MessageMediaEmpty, MessageMediaPhoto, MessageMediaGeo, MessageMediaContact, MessageMediaUnsupported, MessageMediaDocument, MessageMediaWebPage, MessageMediaVenue, MessageMediaGame, MessageMediaInvoice, MessageMediaGeoLive, MessageMediaPoll, MessageMediaDice.
"""
self.peer = peer
self.media = media
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
self.media = utils.get_input_media(self.media)
def to_dict(self):
return {
'_': 'UploadMediaRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer,
'media': self.media.to_dict() if isinstance(self.media, TLObject) else self.media
}
def _bytes(self):
return b''.join((
b'\xb1\xc2\x9bQ',
self.peer._bytes(),
self.media._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
_media = reader.tgread_object()
return cls(peer=_peer, media=_media) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/Gyre-Termes/Marks/Regular/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.GyreTermesMathJax_Marks={directory:"Marks/Regular",family:"GyreTermesMathJax_Marks",testString:"\u00A0\u02DB\u02DD\u0305\u0309\u030F\u0311\u0323\u0326\u032C\u032D\u032E\u032F\u0330\u0331",32:[0,0,250,0,0],160:[0,0,250,0,0],731:[17,245,333,81,261],733:[676,-505,333,37,427],773:[632,-588,0,-416,-83],777:[704,-517,0,-348,-151],783:[711,-540,0,-510,-120],785:[692,-567,0,-425,-75],803:[-89,191,0,-301,-199],806:[-38,281,0,-319,-180],812:[-70,204,0,-421,-79],813:[-80,214,0,-421,-79],814:[-70,195,0,-425,-75],815:[-88,213,0,-425,-75],816:[-88,197,0,-417,-83],817:[-113,167,0,-405,-94],818:[-70,114,0,-416,-83],819:[-70,228,0,-416,-83],831:[746,-588,0,-416,-83],8192:[0,0,500,0,0],8193:[0,0,1000,0,0],8199:[0,0,500,0,0],8200:[0,0,250,0,0],8203:[0,0,0,0,0],8204:[0,0,0,0,0],8205:[0,0,0,0,0],8208:[257,-194,333,39,285],8210:[357,-305,660,80,580],8213:[276,-224,1160,80,1080],8215:[-70,228,493,80,413],8218:[102,141,333,79,218],8222:[102,141,444,45,416],8226:[400,-100,460,80,380],8239:[0,0,200,0,0],8240:[676,13,1000,14,986],8241:[676,13,1320,14,1306],8246:[780,-450,521,60,461],8247:[780,-450,721,60,661],8249:[411,-33,333,57,278],8250:[411,-33,333,45,266],8251:[514,14,564,18,546],8253:[736,8,444,68,414],8274:[662,0,500,28,472],8287:[0,0,222,0,0],8288:[0,0,0,0,0],8289:[702,202,1008,52,956],8290:[0,0,0,0,0],8291:[0,0,0,0,0],8292:[0,0,0,0,0],8400:[710,-600,0,-438,-62],8401:[710,-600,0,-438,-62],8402:[650,150,0,-272,-228],8403:[500,0,0,-276,-224],8404:[768,-599,0,-452,-48],8405:[768,-599,0,-452,-48],8406:[710,-534,0,-443,-57],8408:[400,-100,0,-400,-100],8411:[660,-560,0,-500,0],8412:[660,-560,0,-600,100],8413:[668,168,0,-668,168],8414:[650,150,0,-650,150],8415:[872,372,0,-872,372],8417:[710,-534,0,-479,-21],8420:[735,209,0,-795,295],8421:[650,150,0,-403,-97],8422:[650,150,0,-344,-156],8424:[-70,170,0,-500,0],8425:[726,-548,0,-438,-63],8426:[430,-70,0,-595,95],8427:[650,150,0,-479,-21],8428:[-150,260,0,-438,-62],8429:[-150,260,0,-438,-62],8430:[-84,260,0,-443,-57],8431:[-84,260,0,-443,-57],8432:[747,-509,0,-356,-143],11800:[503,241,444,30,376],12310:[668,168,430,80,350],12311:[668,168,430,80,350]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"GyreTermesMathJax_Marks"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Marks/Regular/Main.js"]); | PypiClean |
/BCPy2000-1.6.tar.gz/BCPy2000-1.6/src/SigTools/LearningTools.py | __all__ = [
'binomial', 'seqste',
'all_pairs', 'one_versus_rest', 'infer_classes',
'logistic', 'invlogistic',
'cg', 'invcg',
'dprime', 'running_mean', 'running_cov',
'svd', 'lda', 'csp', 'csp_itfe',
'confuse', 'balanced_loss', 'class_loss', 'eeop',
'linkern', 'sqdist', 'rbfkern', 'guesswidth', 'kview',
'predictor', 'klr2class', 'lda2class',
'demodata',
'plotopt',
'foldguide', 'experiment',
'overlapping',
'spcov', 'shrinkcov', 'spfilt', 'symwhiten', 'symwhitenkern',
'stfac', 'stfac_filters_and_patterns',
'correlate', 'correlation_pvalue',
]
import numpy
import copy
from .NumTools import asmatrix, mad, loadmat, savemat, project, isequal
from functools import reduce
__all__ += ['asmatrix', 'mad', 'loadmat', 'savemat', 'project', 'isequal']
from .NumTools import summarize, sdict, reportstruct, sstruct
__all__ += ['summarize', 'sdict', 'reportstruct', 'sstruct', ]
def binomial(x, axis=None):
"""
Given a sequence or array of booleans <x>,
Return a dict containing
'mean': proportion of True values,
'ste': standard error of the mean,
'n': number of observations,
These dicts can be combined, to provide
exact incremental standard error estimates,
using seqste()
"""###
x = numpy.asarray(x, dtype=numpy.float64)
if axis == None: n = x.size
else: n = x.shape[axis]
m = x.mean(axis=axis)
v = (m * (1-m))/(n-1)
e = v ** 0.5
return {'mean':m,'ste':e,'n':int(n)}
def test_seqste(p=0.9, n=184):
x = numpy.random.rand(n) < p
b = binomial(x)
n = int(b['n']/3)
b1 = binomial(x[:n])
b2 = binomial(x[n:n*2])
b3 = binomial(x[n*2:])
print(b, "(ground truth)")
print(seqste(b1,b2,b3))
print(seqste(b1,b3,b2))
print(seqste(b2,b1,b3))
print(seqste(b2,b3,b1))
print(seqste(b3,b1,b2))
print(seqste(b3,b2,b1))
def seqste(d1, *more):
"""
d1 and d2 both dicts with entries 'mean', 'ste' and 'n'.
Return a similar dict with a combined estimate of the mean and standard-error-of-mean.
Baker, R.W.R & Nissim, J.A. (1963):
Expressions for Combining Standard Errors of Two Groups and for Sequential Standard Error
Nature 198, 1020; doi:10.1038/1981020a0
http://www.nature.com/nature/journal/v198/n4884/abs/1981020a0.html
"""###
if len(more) == 0: return d1
d2 = more[0]
keys = ['mean','ste','n']
if sorted(d1.keys()) != sorted(keys) or sorted(d2.keys()) != sorted(keys):
raise ValueError('data inputs should be dicts with fields %s' % ','.join(keys))
def conv(x):
if isinstance(x, numpy.ndarray): x = numpy.asarray(x, dtype=numpy.float64)
if isinstance(x, (int,bool)): x = float(x)
return x
m1,e1,n1 = [conv(d1[k]) for k in keys]
m2,e2,n2 = [conv(d2[k]) for k in keys]
n3 = n1 + n2
v3 = n1*(n1-1)*e1**2 + n2*(n2-1)*e2**2 + n1*n2*(m1-m2)**2/n3
v3 /= n3 * (n3-1)
e3 = v3 ** 0.5
m3 = (m1*n1 + m2*n2) / n3
result = {'mean':m3,'ste':e3,'n':int(n3)}
return seqste(result, *more[1:])
def all_pairs(classes):
"""
for neg,pos in all_pairs(5):
print neg,"versus",pos
1 versus 2
1 versus 3
2 versus 3
1 versus 4
2 versus 4
3 versus 4
1 versus 5
2 versus 5
3 versus 5
4 versus 5
You get the idea. The input may alternatively be a sequence of class
identifiers.
"""###
if isinstance(classes, (float,int)): classes = list(range(1,int(classes)+1))
classes = sorted(tuple(set(classes)))
pairs = []
for x in range(len(classes)):
for y in range(x):
pairs.append((classes[y], classes[x]))
return tuple(pairs)
def one_versus_rest(classes):
"""
for neg,pos in one_versus_rest(5):
print neg,"versus",pos
(2, 3, 4, 5) versus 1
(1, 3, 4, 5) versus 2
(1, 2, 4, 5) versus 3
(1, 2, 3, 5) versus 4
(1, 2, 3, 4) versus 5
You get the idea. The convention is for the "rest" to come out first
(to be labelled as -1 in the binary sub-problem) and the "one" to come
out second (to be labelled as +1).
The input may alternatively be a sequence of class identifiers.
"""###
if isinstance(classes, (float,int)): classes = list(range(1,int(classes)+1))
classes = set(classes)
standardize = lambda x: tuple(sorted(tuple(x))) # because python sorts set((-1,1)) the wrong way round for some inexplicable reason
if len(classes) == 2: return (standardize(classes),)
rest = lambda x: standardize(classes.difference((x,)))
pairs = [(rest(one),one) for one in standardize(classes)]
return tuple(pairs)
def infer_classes(cc, types=None):
"""
classes,data = infer_classes(data)
If <data> is a 2-element sequence, assign the classes (-1, +1).
Otherwise, if <data> is an n-element sequence, return the classes
1 through n.
If data is a dictionary, the classes will be inferred from the keys,
and the classes and data delivered in a standardized order. If all the
keys are scalar, they will simply be returned sorted in ascending order.
If any are sequences, then all the keys will be made into sequences,
sorted within themselves and then delivered in an order that is sorted
first by decreasing length, then by value. So, for example, the input
{3:'one', (2,1): 'rest'}
will yield classes=( (1,2) , (3,) ) and data=( 'rest' , 'one' ).
The sorting-by-decreasing-length ensures that, in a one-versus-rest
problem, the "rest" will always come out first (to be mapped to -1) and
the "one" will come out second (to be mapped to +1).
"""###
if len(cc) == 1 and isinstance(cc[0], (list,tuple,set,dict)): cc = cc[0]
if len(cc) == 2: classes = (-1, +1)
else: classes = tuple(range(1,len(cc)+1))
isseq = lambda x: isinstance(x,(tuple,list,set))
tuplify = lambda x: isseq(x) and tuple(x) or (x,)
if isinstance(cc, dict):
k = list(cc.keys())
v = list(cc.values())
if True in list(map(isseq, k)):
k = [tuple(set(tuplify(x))) for x in k]
keylencmp = lambda x,y: cmp( (-len(x[0]),)+x[0],(-len(y[0]),)+y[0] )
classes, cc = list(zip(*sorted(zip(k,v),cmp=keylencmp)))
else:
classes, cc = list(zip(*sorted(cc.items())))
if isinstance(types, list): types = tuple(types)
if types != None and False in [isinstance(c,types) for c in cc]:
raise TypeError('inputs must be of one of the following types: '+repr(types))
return classes, cc
def logistic(x, deriv=0):
f = 1.0 / (1.0 + numpy.exp(-x))
if deriv == 0: return f
elif deriv == 1: return f * (1.0 - f)
else: raise ValueError("derivative %s not defined" % str(deriv))
def invlogistic(p):
return -numpy.log(1.0/p - 1.0)
def cg(x, deriv=0):
import scipy.special
if deriv == 0: return 0.5 * scipy.special.erfc(-x / 2.0 ** 0.5)
elif deriv == 1: return numpy.exp(-0.5 * x ** 2.0) * (numpy.pi * 2) ** -0.5
else: raise ValueError("derivative %s not defined" % str(deriv))
def invcg(p):
import scipy.special
return 2.0 ** 0.5 * scipy.special.erfinv(2.0 * p - 1.0)
def dprime(*cc, **kwargs):
"""
Compute the dprime value (signed square root of the Fisher score)
between two running_mean or running_cov objects which correspond
to two different classes.
"""###
if len(cc):
if len(kwargs): raise TypeError("supply either all-unnamed args or all-named")
classes,cc = infer_classes(cc, [running_mean,running_cov])
if len(cc) != 2: raise TypeError('expected two inputs')
return (cc[1].m - cc[0].m) / numpy.sqrt(cc[1].v + cc[0].v)
if len(kwargs):
if len(cc): raise TypeError("supply either all-unnamed args or all-named")
x = kwargs.pop('x')
y = kwargs.pop('y')
exemplar_dim = kwargs.pop('exemplar_dim', 0)
if len(kwargs): raise TypeError("unexpected keyword argument %s" % list(kwargs.keys())[0])
sub = [slice(None) for d in x.shape]
sub[exemplar_dim] = [i for i,yi in enumerate(y.flat) if yi > 0.0]
xsub = x[sub]
mpos = xsub.mean(axis=exemplar_dim)
vpos = xsub.var(axis=exemplar_dim)
sub[exemplar_dim] = [i for i,yi in enumerate(y.flat) if yi < 0.0]
xsub = x[sub]
mneg = xsub.mean(axis=exemplar_dim)
vneg = xsub.var(axis=exemplar_dim)
return (mpos - mneg) / numpy.sqrt(vpos + vneg)
class running_mean(object):
"""
An object that keeps track of the mean and variance of a series
of values x presented online. Each x may be a scalar value or a
numpy.array.
Exemplars are added to object r by the += operator:
r = running_mean()
r += x
The object r has the following properties:
r.n : number of samples so far
r.m : mean of x so far (same shape as incoming x)
r.v : variance of x so far, normalized by r.n (same
shape as incoming x)
r.v_unbiased : a virtual attribute which returns the variance
normalized by (r.n - 1.0) instead of by r.n
If r is created with fullcov=True, then elements x are flattened
as they are added (so, for one thing, the mean and variance will
be flat arrays with length equal to the number of elements of x)
and a full covariance matrix is also computed, and is accessible
using the properties r.C and r.C_unbiased (analogous to r.v and
r.v_unbiased).
If r.persistence=1.0, then all previous samples are "remembered"
and each incoming exemplar counts as r.increment number of new
samples (the increment may be measured in any units you like -
seconds, for example). If r.persistence < 1.0, then an
exponential forgetting factor of 1.0-r.persistence is used, and
although self.increment is added to s.n, s.n does not fully
reflect the number of degrees of freedom in the estimation, which
is roughly equal to 1/(1.0-r.persistence)
The reset() method zeroes everything.
"""###
def __init__(self, persistence=1.0, increment=1.0, fullcov=False):
"""
The persistence and increment arguments initialize the
self.persistence and self.increment attributes.
"""###
self.increment = float(increment)
self.persistence = float(persistence)
self.fullcov = fullcov
self.reset()
def reset(self):
self.sumx2 = 0.0
self.sumx1 = 0.0
self.denom = 0.0
self.n = 0.0
def update(self, x, increment=None):
if increment == None: increment = self.increment
persistence = self.persistence
if self.n == 0.0: persistence = 0.0
if self.fullcov:
x1 = numpy.asarray(x).flatten()
xM = numpy.asmatrix(x1)
x2 = xM.H * xM
else:
x1 = x
x2 = numpy.multiply(x, numpy.conj(x))
self.sumx2 = persistence * self.sumx2 + x2
self.sumx1 = persistence * self.sumx1 + x1
self.denom = persistence * self.denom + increment
self.n += increment
def get_mean(self):
if self.denom == 0.0: return numpy.nan
return self.sumx1 / self.denom
def get_variance_biased(self, return_fullcov=False):
if self.denom == 0.0: return numpy.nan
if return_fullcov and not self.fullcov:
raise ValueError("full covariance matrices are not available from this object")
if self.fullcov and not return_fullcov:
mean_xsquared = self.sumx2.diagonal().A.flatten() / self.denom
else:
mean_xsquared = self.sumx2 / self.denom
meanx = self.sumx1 / self.denom
if self.fullcov and return_fullcov:
meanx = numpy.asmatrix(meanx)
squared_meanx = meanx.H * meanx
else:
squared_meanx = numpy.multiply(meanx, numpy.conj(meanx))
return mean_xsquared - squared_meanx
def get_variance_unbiased(self, return_fullcov=False):
if self.denom <= self.increment: return numpy.nan
return self.get_variance_biased(return_fullcov=return_fullcov) * (self.denom / (self.denom - self.increment))
@apply
def m():
def fget(self): return self.get_mean()
return property(fget=fget, doc="running mean estimate")
@apply
def v():
def fget(self): return self.get_variance_biased(return_fullcov=False)
return property(fget=fget, doc="running variance estimate normalized by n (see also v_unbiased)")
@apply
def v_unbiased():
def fget(self): return self.get_variance_unbiased(return_fullcov=False)
return property(fget=fget, doc="running variance estimate normalized by n-1 (see also v)")
@apply
def C():
def fget(self): return self.get_variance_biased(return_fullcov=True)
return property(fget=fget, doc="running covariance estimate normalized by n (see also C_unbiased)")
@apply
def C_unbiased():
def fget(self): return self.get_variance_unbiased(return_fullcov=True)
return property(fget=fget, doc="running covariance estimate normalized by n-1 (see also C)")
def __iadd__(self, x):
self.update(x)
return self
def run(self, x, axis=-1, reset=False):
"""
Test the running_mean object by adding <x> one sample at a time,
where samples are slices concatenated along the specified <axis>.
If <reset> is passed as True, the object is reset first.
running_mean().run(x, axis=0).m
# should be the same as numpy.mean(x, axis=0)
running_mean().run(x, axis=0).v
# should be the same as numpy.var(x, axis=0)
running_mean(fullcov=True).run(x, axis=0).C_unbiased
# should be the same as numpy.cov(x, axis=0)
"""###
if reset: self.reset()
x = numpy.array(x, copy=False)
x = x.view()
if axis < 0: axis += len(x.shape)
x.shape = tuple(list(x.shape) + [1]*(axis+1-len(x.shape)))
sub = [slice(None)] * len(x.shape)
for i in range(x.shape[axis]): sub[axis] = i; self += x[sub]
return self
def plot(self, *pargs, **kwargs):
"""
Works only for an object that has accumulated information about
two-dimensional inputs. Plots an ellipse centred on the computed
mean, indicating the shape of the covariance of the distribution
of x. The size of the ellipse is specified by optional keyword
argument nstd=2.0 (any other arguments are passed through to plot).
"""###
if not self.fullcov: raise ValueError("plot method is only available for objects that compute full covariance matrices (construct with fullcov=True)")
if numpy.asarray(self.m).size != 2: raise ValueError("plot method is only available for objects that have accumulated two-dimensional data")
nstd = kwargs.pop('nstd', 2.0)
r = numpy.linspace(0, 2*numpy.pi, 100)
x = numpy.asmatrix(self.m).A + nstd*(svd(self.C).sqrtm * numpy.matrix([numpy.cos(r), numpy.sin(r)])).A
from . import Plotting; pylab = Plotting.load_pylab()
pylab.plot(x[0],x[1],*pargs,**kwargs)
pylab.draw()
class running_cov(running_mean):
"""
A class for computing means, variances and covariances online, optionally with a decay factor.
It is a subclass of running_mean for which the fullcov attribute is always initialized to True.
It is included under the separate name running_cov purely for backward compatibility. See
running_mean for more details.
"""###
def __init__(self, persistence=1.0, increment=1.0):
running_mean.__init__(self, persistence=persistence, increment=increment, fullcov=True)
class ema(running_mean):
def reset(self):
self.sumx1 = 0.0
self.sumx2 = 0.0
self.n = 0.0
self.denom = 1.0
def update(self, x, increment=None):
if increment == None: increment = self.increment
persistence = self.persistence
if persistence == 1.0: persistence = self.n / (self.n + increment)
if self.n == 0.0: persistence = 0.0
if self.fullcov:
x1 = numpy.asarray(x).flatten()
xM = numpy.asmatrix(x1)
x2 = xM.H * xM
else:
x1 = x
x2 = numpy.multiply(x, numpy.conj(x))
self.sumx1 = persistence * self.sumx1 + (1.0 - persistence) * x1
self.sumx2 = persistence * self.sumx2 + (1.0 - persistence) * x2
self.n += increment
class svd(object):
"""
d = svd(A) where A is m-by-n
The following are computed immediately, using the singular value
decomposition A = d.U * numpy.diag(d.s) * d.V.H
d.s: the singular values of A in decreasing order
d.rank: estimated rank r of A, = #{singular values > tol},
where tol defaults to max(m,n)*eps*max(d.s)
d.cond: estimated condition number of A, = max(d.s)/min(d.s)
d.U: m by r orthonormal basis for the column space of A
d.leftnull: m by min(m,n)-r the discarded columns of U
d.V: n by r orthonormal basis for the row space of A
d.null: n by min(m,n)-r the discarded columns of V
d.original: a copy of A, unless keepcopy is set to False
The following can then be computed cheaply. They are computed on demand
the first time they are requested, and then cached:
d.pinv: pseudo-inverse of A
d.sqrtm: X such that X*X.H = (U*S*U.H) and X.H*X = (V*S*V.H)
(for symmetric A, therefore, X is the matrix-square-root)
d.isqrtm: the inverse of d.sqrtm, when A is invertible: i.e. X
such that X*X.H = (U*S*U.H).I and X.H*X = (V*S*V.H).I
d.reconstruction: reconstruction of A, without using the discarded
columns of d.U and d.V
"""###
def __init__(self, A, tol=None, keepcopy=True):
A = asmatrix(A, copy=keepcopy)
(U, s, Vh) = numpy.linalg.svd(A, full_matrices=False, compute_uv=True)
if tol==None: tol = max(A.shape) * numpy.finfo(A.dtype).eps * max(s)
r = numpy.sum(s > tol)
if keepcopy: self.original = A
self.rank = r
smin,smax = min(s),max(s)
if smin: self.cond = smax/smin
else: self.cond = numpy.inf
self.U = U[:,:r] # columns of U are an orthonormal basis for the column space of A
self.s = s[:r]
self.V = Vh[:r,:].H # columns of V are an orthonormal basis for the row space of A
self.null = Vh[r:,:].H
self.leftnull = U[:,r:]
def __getattr__(self, key):
if key == 'pinv': self.__dict__[key] = self.V * numpy.diag(1.0/self.s) * self.U.H
if key == 'sqrtm': self.__dict__[key] = self.U * numpy.diag(self.s**0.5) * self.V.H # X*X.H = (U*S*U.H), X.H*X = (V*S*V.H)
if key == 'isqrtm': self.__dict__[key] = self.U * numpy.diag(self.s**-0.5) * self.V.H # X*X.H = (U*S*U.H).I, X.H*X = (V*S*V.H).I for invertible matrices
if key == 'reconstruction': self.__dict__[key] = self.U * numpy.diag(self.s) * self.V.H
if not hasattr(self, key): raise AttributeError(key)
return self.__dict__.get(key)
def _getAttributeNames(self):
return ['pinv', 'sqrtm', 'isqrtm', 'reconstruction']
def __repr__(self):
s = "<%s.%s instance at 0x%08X>" % (self.__class__.__module__,self.__class__.__name__,id(self))
s = '\n'.join([s] + [" %s: % 3d by % 3d" % (tuple([key]+list(getattr(self,key).shape))) for key in ['U', 'V']])
return s
class LDAError(Exception): pass
class lda(object):
"""
Fisher's linear discriminant analysis. Example:
f = lda(ridge=0.1)
f.solve(rneg, rpos) # rneg and rpos are running_cov objects
# corresponding to the negative and positive
# classes: their .m and .C contain the
# features' means and covariances respectively.
rneg += xneg
rpos += xpos
f.rebias(rneg, rpos) # don't re-solve, but re-bias according to the
# updated estimates.
f.predict(xtest) # xtest is a sequence of features for a single
# exemplar, or a features-by-exemplars array.
f.w contains the weights
f.b contains the bias
"""###
def __init__(self, ridge=0.0):
"""
Initialize the object's self.ridge attribute, indicating
the amount of L2 regularization. The self.ridge parameter is
scaled by the mean diagonal element of the between-class
covariance matrix, then added to the diagonal.
"""###
self.ridge = ridge
def solve(self, *cc):
"""
f.solve(rneg, rpos) # rneg and rpos are running_cov objects
f.solve(d) # the running_cov objects are in a sequence
# or dict d (infer_classes is called).
"""###
self.classes,cc = infer_classes(cc, [running_cov])
if len(cc) < 2: raise LDAError('need at least two classes')
if len(cc) > 2: raise LDAError('multiclass LDA not supported')
C = sum([c.C for c in cc]) # say that fast three times
if self.ridge: C += numpy.eye(C.shape[0]) * self.ridge * C.diagonal().mean()
self.w = numpy.linalg.solve(C, cc[1].m - cc[0].m)
self.b = 0.0
self.rebias(cc)
return self
def rebias(self, *cc):
"""
f.rebias(rneg, rpos) # rneg and rpos are running_cov objects
f.rebias(d) # the running_cov objects are in a sequence
# or dict d (infer_classes is called).
"""###
classes,cc = infer_classes(cc, [running_cov])
m = sum([c.m for c in cc]) / float(len(cc))
self.b = self.b - self.predict(m)
def predict(self, x):
"""
Input is a sequence of features for a single exemplar, or a
features-by-exemplars array. Output is a real-valued decision
value per exemplar.
"""###
return asmatrix(self.w).H* asmatrix(x) + self.b
def plot(self, *cc):
classes,cc = infer_classes(cc, [running_cov])
m = sum([c.m for c in cc]) / float(len(cc))
cc[0].plot('b-')
cc[1].plot('r-')
db = [-self.w[1], self.w[0]]
from . import Plotting; pylab = Plotting.load_pylab()
xl = list(pylab.gca().get_xlim())
yl = list(pylab.gca().get_ylim())
pylab.plot([m[0]-2*db[0],m[0]+2*db[0]], [m[1]-2*db[1],m[1]+2*db[1]], 'k-')
pylab.gca().set_xlim(xl)
pylab.gca().set_ylim(yl)
class ldatest(object):
def __init__(self,x1=None,x2=None,ridge=0.0):
if x1==None: x1 = numpy.random.randn(2,1)+(numpy.matrix(numpy.random.rand(2,2))*numpy.matrix(numpy.random.randn(2,1000))).A
if x2==None: x2 = numpy.random.randn(2,1)+(numpy.matrix(numpy.random.rand(2,2))*numpy.matrix(numpy.random.randn(2,1000))).A
self.x1 = x1
self.x2 = x2
self.cc = [running_cov().run(x,1) for x in [self.x1,self.x2]]
self.s = lda(ridge=ridge).solve(self.cc)
def plot(self):
from . import Plotting; pylab = Plotting.load_pylab()
pylab.clf()
pylab.plot(self.x1[0],self.x1[1],'bx')
pylab.plot(self.x2[0],self.x2[1],'r+')
self.s.plot(self.cc)
ax = pylab.gca();
xl,yl = ax.get_xlim(),ax.get_ylim()
x = numpy.linspace(xl[0],xl[1],10)
y = numpy.linspace(yl[0],yl[1],10)
xx,yy=numpy.meshgrid(x,y)
xy = numpy.concatenate((xx.reshape((1,xx.size)),yy.reshape((1,yy.size))),axis=0)
z = self.s.predict(xy).reshape(xx.shape)
z = logistic(z)
z = numpy.array(z, order='C', dtype=numpy.float64, copy=True)
pylab.contour(x,y,z,numpy.arange(0.1,1.0,0.1))
pylab.draw()
class CSPError(Exception): pass
class csp(object):
def __init__(self, classcov, globalcov=None, whichclass=1, normalize=False):
"""
Implements the CSP algorithm of Koles (1991).
<classcov> is the class covariance matrix. Alternatively, it is a
sequence or dict of class-covariance matrices, in which case the
class labels are inferred using infer_classes(), and the matrix
corresponding to <whichclass> is used.
<globalcov> is the global covariance. If omitted, then classcov must
be a sequence or dict containing information about all relevant
classes: then <globalcov> is computed as an equal weighting between
the class of interest <whichclass>, and the mean of other classes.
<normalize> is a boolean option specifying whether to normalize
covariance matrices by their trace before use.
The output object c has the following attributes:
c.classes : the sequence of inferred classes
c.whichclass : the class of interest
c.opts : dict containing options (normalize)
c.classcov : the class covariance matrix of interest (after
normalization, if any)
c.globalcov : the global covariance matrix used (after
normalization, if any)
c.whitening : symmetric whitening or spherizing matrix
c.rotation : rotation directions (one in each column)
c.filters : spatial filters (one in each column)
c.patterns : spatial patterns (one in each column)
c.eigenvalues : eigenvalue corresponding to each column
The best() method is useful for selecting based on eigenvalue.
"""###
if not isinstance(classcov, (list,tuple,dict)): classcov = {whichclass:classcov}
self.classes,classcov = infer_classes(classcov)
if not whichclass in self.classes: raise CSPError('no information about class'+str(whichclass))
for i in range(len(classcov)):
if isinstance(classcov[i],running_mean): classcov[i] = classcov[i].m
classcov[i] = numpy.matrix(classcov[i]) # copies, and ensures matrixhood
if normalize: classcov[i] /= classcov[i].trace()
d = dict(list(zip(self.classes,classcov)))
if len(self.classes)==1: self.classes = (-1,+1)
self.whichclass = whichclass
self.opts = {'normalize':normalize}
self.classcov = d.pop(whichclass)
if globalcov==None:
if len(d) == 0: raise CSPError('too few matrices')
globalcov = 0.5 * self.classcov + 0.5 * sum(d.values())/len(d)
# (equal weighting of whichclass and the rest)
if isinstance(globalcov,running_mean): globalcov = globalcov.m
globalcov = numpy.matrix(globalcov) # copies, and ensures matrixhood
if normalize: globalcov /= globalcov.trace()
self.globalcov = globalcov
s1 = svd(self.globalcov)
self.whitening = s1.isqrtm
m = self.whitening.H * self.classcov/2.0 * self.whitening
s2 = svd(m)
self.rotation = s2.U
self.filters = self.whitening * self.rotation
self.patterns = self.globalcov * self.filters
for j in range(self.patterns.shape[1]):
pattern = self.patterns[:,j]
if numpy.max(numpy.abs(pattern)) == -numpy.min(pattern):
pattern *= -1.0
self.filters[:,j] *= -1.0
self.rotation[:,j] *= -1.0
self.eigenvalues = s2.s
def best(self, n, m=None):
"""
c.best(n) or c.best([n]) returns a list of indices to the best
n eigenvalues in the sense of absolute deviation from 0.5.
c.best(n, m) or c.best([n,m]) returns a list of indices to the
best n eigenvalues from the high end of the spectrum and the
best m eigenvalues from the low end.
Example:
ind = c.best(3, 3)
filt = c.filters[:, ind]
pat = c.patterns[:, ind]
eig = c.eigenvalues[:, ind]
"""###
n = numpy.array(n,copy=False).ravel().tolist()
if len(n) == 1 and m != None:
n += numpy.array(m,copy=False).ravel().tolist()
if len(n) == 1:
n += [0]
d = numpy.abs(self.eigenvalues - 0.5)
elif len(n) == 2:
d = self.eigenvalues
else: raise CSPError('expected either 1 or 2 numbers')
pp = [(d[i], i) for i in range(len(d))]
pp = [p[1] for p in sorted(pp, reverse=True)]
out = pp[:n[0]]
if n[1]: out += pp[-n[1]:]
return out
def csp_itfe(filters, classcov=None, globalcov=None, classprob=None):
"""
For a set of spatial <filters> expressed one-per-column, a dict or
sequence of <classcov> objects ( as supplied to csp() ) return the
Information-Theoretic Feature Extraction criterion for CSP defined in:
Grosse-Wentrup and Buss (2008)
IEEE Transactions on Biomedical Engineering 55(8), 1991-2000
Optionally, a list/numpy-array/dict of class probabilities may be
supplied in <classprob>.
"""###
if isinstance(filters, csp):
c = filters
filters = c.filters
if classcov == None: classcov = (c.classcov,)
if globalcov == None: globalcov = c.globalcov
if classcov==None: raise CSPError('class covariance matrices not supplied')
if isinstance(globalcov, running_cov): globalcov = globalcov.C
classes,classcov = infer_classes(classcov, [numpy.ndarray, running_cov])
M = len(classes)
if M == 1:
if globalcov==None: raise CSPError('not enough covariance matrices')
classcov = classcov[0]
if isinstance(classcov, running_cov): classcov = classcov.C
classcov = (2.0*globalcov-classcov, classcov)
classes = (-1,1)
M = 2
#import IPython;IPython.Debugger.Tracer()()
if classprob == None:
classprob = numpy.asmatrix(numpy.ones((M,1)))/M
else:
if isinstance(classprob, numpy.ndarray):
classprob = classprob.flatten().tolist()
cl,clp = infer_classes(classprob)
if len(cl) != len(classes) or (isinstance(classprob,dict) and cl != classes):
raise CSPError('class labels are inconsistent between classcov and classprob inputs')
classprob = numpy.matrix(clp).T
W = numpy.asmatrix(filters)
nfilt = W.shape[1]
proj = numpy.asmatrix(numpy.zeros((nfilt,M)))
default_globalcov = 0.0
classcov = list(classcov)
for j in range(M):
if isinstance(classcov[j], running_cov): classcov[j] = classcov[j].C
classcov[j] = numpy.matrix(classcov[j])
C = classcov[j] / float(M)
default_globalcov = default_globalcov + C
proj[:, j].flat = numpy.diag(W.H * C * W)
if globalcov == None: globalcov = default_globalcov
nrm = numpy.diag(W.H * globalcov * W)
nrm.shape += (1,)
proj = proj / nrm
v = -numpy.log(numpy.power(proj, 0.5))*classprob - numpy.power((numpy.power(proj,2.0)-1.0)*classprob, 2.0) * 3.0/16.0
return v
def linkern(x, x2=None, exemplar_dim=0):
"""
Computes a linear kernel between data points x
(or if x2 supplied, between x on the rows and x2
on columns). x (and x2) are packed data arrays,
such that one step along dimension <exemplar_dim>
is a step from one datapoint or exemplar to the
next.
"""###
if x2 == None: x2 = x
withself = (id(x2) == id(x))
if not isinstance(x, numpy.ndarray): x = numpy.asarray(x)
if withself: x2 = x
if not isinstance(x2, numpy.ndarray): x2 = numpy.asarray(x2)
nd = max(exemplar_dim+1, len(x.shape), len(x2.shape))
if exemplar_dim < 0: exemplar_dim += nd
ax = list(range(nd))
ax.remove(exemplar_dim)
x = project(x, nd-1)
x2 = project(x2, nd-1)
return numpy.tensordot(x, x2, axes=(ax,ax))
def rbfkern(x, x2=None, exemplar_dim=0, invcov=None, sigma=1.0):
"""
Computes a Gaussian RBF kernel between data points x (or if
x2 is supplied, between x on the rows and x2 on the columns).
x (and x2) are packed data arrays, such that one step along
dimension <exemplar_dim> is a step from one datapoint or
exemplar to the next. The argument <invcov>, if any, is
passed through to sqdist() for computing distances, and
<sigma> is the length scale by which the result is divided.
"""###
d2 = sqdist(x=x, x2=x2, exemplar_dim=exemplar_dim, invcov=invcov)
d2 /= -2.0 * sigma ** 2.0
d2 = numpy.exp(d2)
return d2
def guesswidth(x=None, y=None, dsq=None, exemplar_dim=0, invcov=None, norm=2, return_rbfkern=False):
"""
Based on data <x>, and optionally on labels <y>, guess the optimal length scale for
an RBF kernel (and for return_rbfkern=True, return said kernel in addition to the
estimated length scale).
Options <exemplar_dim> and <invcov> are passed through to sqdist() in order to
compute squared distances if you supply <x>. However, for efficiency reasons you
may wish to precompute the squared distances, passing them in as <dsq> instead of
<x>, in which case these options do nothing.
More generally, you can base your width estimate on other norms besides the 2-norm
distance, by setting <norm> to something other than 2. Then, instead of squared
distances, you may pass in <dsq> your pre-computed matrix of
dsq[i,j] = sum_over_k ( abs(x[i][k] - x[j][k]) ** norm )
where k indexes the features in each exemplar. In other words, the elements of dsq
should be p-norms-raised-to-the-power-p.
"""###
if dsq == None and x == None: raise ValueError("must supply either x or dsq")
if dsq != None and x != None: raise ValueError("supply either x or dsq, but not both")
if dsq == None:
dsq = sqdist(x, exemplar_dim=exemplar_dim, invcov=invcov)
if norm != 2: raise RuntimeError("currently, if you want to use norm values other than 2, you must precompute the matrix and pass it in as dsq")
keep = numpy.logical_not(numpy.eye(dsq.shape[0], dtype=numpy.bool))
if y != None:
for i,yi in enumerate(y):
for j,yj in enumerate(y):
keep[i,j] = isequal(yi,yj)
a = dsq.flat[keep.flatten()]
sigma = numpy.median(a) ** (1.0/norm)
if return_rbfkern:
if x == None: dsq = dsq.copy() # don't modify original if that was passed in
dsq /= -norm * sigma ** norm
rbfK = dsq = numpy.exp(dsq)
return sigma, rbfK
else:
return sigma
def sqdist(x, x2=None, exemplar_dim=0, invcov=None):
"""
Computes a matrix of squared Euclidean distances
between data points x (or if x2 supplied, between
x on the rows and x2 on columns). x (and x2) are
packed data arrays, such that a step along dimension
<exemplar_dim> is a step from one datapoint or
exemplar to the next.
If supplied, <invcov> should be a symmetric matrix.
The distance is then computed in the space
transformed by this matrix. If p and q are m-by-1
numpy.matrix representations of two data points,
then the distance between them is computed as:
(p-q).T * invcov * (p-q)
If <invcov> is truly the inverse of the covariance
matrix between the features of <x>, then the result
of the function is the Mahalanobis distance.
"""###
if x2 == None: x2 = x
withself = (id(x2) == id(x))
if not isinstance(x, numpy.ndarray): x = numpy.asarray(x)
if withself: x2 = x
if not isinstance(x2, numpy.ndarray): x2 = numpy.asarray(x2)
nd = max(exemplar_dim+1, len(x.shape), len(x2.shape))
if exemplar_dim < 0: exemplar_dim += nd
ax = list(range(nd))
ax.remove(exemplar_dim)
x = project(x, nd-1)
x2 = project(x2, nd-1)
if invcov == None:
xmag = numpy.multiply(x, x)
for dim in ax: xmag = numpy.expand_dims(xmag.sum(axis=dim), dim)
if withself:
x2mag = xmag.view()
else:
x2mag = numpy.multiply(x2, x2)
for dim in ax: x2mag = numpy.expand_dims(x2mag.sum(axis=dim), dim)
xmag.shape = (xmag.size,1)
x2mag.shape = (1,x2mag.size)
return xmag + x2mag - 2 * numpy.tensordot(x, x2, axes=(ax,ax))
else:
A = numpy.asmatrix(invcov)
if exemplar_dim == 0: p = x # project() has already made a view of x
else: p = x.swapaxes(0, exemplar_dim)
p.shape = (p.shape[0], p.size/p.shape[0])
p = numpy.asmatrix(p)
pA = p * A
pmag = numpy.multiply(p, pA).sum(axis=1)
if withself:
q, qA, qmag = p, pA, pmag.view()
else:
if exemplar_dim == 0: q = x2 # project() has already made a view of x2
else: q = x2.swapaxes(0, exemplar_dim)
q.shape = (q.shape[0], q.size/q.shape[0])
q = numpy.asmatrix(q)
qA = q * A
qmag = numpy.multiply(q, qA).sum(axis=1)
pmag.shape = (pmag.size, 1)
qmag.shape = (1, qmag.size)
return pmag + qmag - 2 * pA * q.T
# NB: this assumes A is symmetric --- really it's -p * (A+A.T) * q.T
def kview(K, y=None):
from . import Plotting; pylab = Plotting.load_pylab()
import matplotlib
pylab.clf()
K = numpy.asarray(K)
if y == None:
fdiff = []
else:
if K.shape[1] != K.shape[0]: raise ValueError("K must be square to reorder by label")
order = list(zip(*sorted([(yi,i) for i,yi in enumerate(y)])))[1]
y = numpy.asarray(y)[order,:]
K = K[order,:][:,order]
fdiff = numpy.where(numpy.diff(y, axis=0))[0] + 0.5
Plotting.imagesc(K)
for d in fdiff:
pylab.plot(pylab.gca().get_xlim(), [d,d], linewidth=2, linestyle='--', color=(0.0,1.0,0.0), scalex=False, scaley=False)
pylab.plot([d,d], pylab.gca().get_ylim(), linewidth=2, linestyle='--', color=(0.0,1.0,0.0), scalex=False, scaley=False)
pylab.colorbar()
pylab.draw()
def confuse(true, predicted, labels=None, exemplar_dim=0):
"""
Returns a confusion matrix and a list of unique labels, based on paired lists of
true and predicted labels.
Output rows correspond to the possible true labels and columns correspond to the
possible predicted labels. This is the ordering assumed in, for example,
balanced_loss().
"""###
true = numpy.asarray(true)
predicted = numpy.asarray(predicted)
nd = max(exemplar_dim+1, len(true.shape), len(predicted.shape))
if exemplar_dim < 0: exemplar_dim += nd
true = true.swapaxes(exemplar_dim, 0)
predicted = predicted.swapaxes(exemplar_dim, 0)
if len(true) != len(predicted): raise ValueError('mismatched numbers of true and predicted labels')
def isequal(a,b):
if isinstance(a, str) and isinstance(b, str): return a == b
a = numpy.asarray(a)
b = numpy.asarray(b)
ndd = len(b.shape) - len(a.shape)
if ndd > 0: a.shape += (1,) * ndd
if ndd < 0: b.shape += (1,) * -ndd
if a.shape != b.shape: return False
return numpy.logical_or(a == b, numpy.logical_and(numpy.isnan(a), numpy.isnan(b))).all()
def find(a, b, append=False):
for i in range(len(b)):
if isequal(a, b[i]): return i
if append: b.append(a); return len(b)-1
else: return None
n = len(true)
c = {}
found_labels = []
for i in range(n):
tv,pv = true[i],predicted[i]
ti = find(tv, found_labels, append=True)
pi = find(pv, found_labels, append=True)
key = (ti,pi)
c[key] = c.get(key,0) + 1
if labels == None:
labels = list(found_labels)
try: labels.sort()
except: pass
else:
labels = list(labels)
for fi in found_labels:
if find(fi, labels) == None: raise ValueError('inputs contain labels not listed in the <labels> argument')
nclasses = len(labels)
C = numpy.zeros((nclasses, nclasses), dtype=numpy.float64)
for i in range(nclasses):
ti = find(labels[i], found_labels, append=False)
if ti == None: continue
for j in range(nclasses):
tj = find(labels[j], found_labels, append=False)
if tj == None: continue
C[i,j] = c.get((ti,tj), 0)
return C,labels
def balanced_loss(true=None, predicted=None, confusion_matrix=None):
"""
err, se = balanced_loss(true, predicted)
err, se = balanced_loss(confusion_matrix=C)
where C = confuse(true, predicted)
A classification loss function. As in confuse(), each row of <true> or
<predicted> is a label for one instance or data point.
balanced_loss() is asymmetric with regard to its inputs: it is the mean
of the misclassification rates on each of the classes (as per <true>).
"""###
if confusion_matrix == None:
predicted = numpy.asarray(predicted).flatten()
if (predicted > numpy.floor(predicted)).any(): predicted = numpy.sign(predicted)
confusion_matrix,labels = confuse(true=true, predicted=predicted)
confusion_matrix = numpy.asarray(confusion_matrix, dtype=numpy.float64)
hits = confusion_matrix.diagonal()
totals = confusion_matrix.sum(axis=1)
hits = hits[totals != 0]
totals = totals[totals != 0]
err = 1 - (hits /totals)
ste = (err * (1 - err) / (totals-1)) ** 0.5
n = totals.min() # combine means and standard errors as if all classes had the same number of members as the smallest class
# (for means, that's just a flat average of error rates across classes; for standard errors it's the most conservative way to do it)
d = [{'mean':err[i], 'ste':ste[i], 'n':n} for i in range(len(totals))]
d = seqste(*d)
return d['mean'],d['ste']
def class_loss(true=None, predicted=None, confusion_matrix=None):
"""
err, se = class_loss(true, predicted)
err, se = class_loss(confusion_matrix=C)
where C = confuse(true, predicted)
Actually class_loss() is symmetrical in its input arguments, but the
order (true, predicted) is the convention established elsewhere,
e.g. in balanced_loss()
A classification loss function. As in confuse(), each row of <true>
or <predicted> is a label for one instance or data point.
"""###
if confusion_matrix == None:
predicted = numpy.asarray(predicted).flatten()
if (predicted > numpy.floor(predicted)).any(): predicted = numpy.sign(predicted)
confusion_matrix,labels = confuse(true=true, predicted=predicted)
confusion_matrix = numpy.asarray(confusion_matrix, dtype=numpy.float64)
n = confusion_matrix.sum()
err = 1 - confusion_matrix.trace() / n
se = (err * (1 - err) / (n-1)) ** 0.5
return err,se
def eeop(f,y):
"""
A one-dimensional classifier: given values f and binary labels y (+/- 1),
return the equal-error operating point.
"""###
e = {
-1:numpy.zeros((len(f),), dtype=numpy.float64),
+1:numpy.zeros((len(f),), dtype=numpy.float64),
}
y = numpy.sign(y)
z = sorted(zip(f,y))
for i,(fi,yi) in enumerate(z):
if yi == 0: continue
if i:
e[yi][i] = e[yi][i-1] + 1
e[-yi][i] = e[-yi][i-1]
else:
e[yi][i] = 1
for k in e: e[k] /= e[k][-1]
e[+1] = 1 - e[+1]
de = abs(e[+1] - e[-1])
de = de[:-1]
mde = min(de)
ind = numpy.where(de==mde)[0]
#import Plotting; pylab = Plotting.load_pylab(); pylab.subplot(1,2,1); Plotting.plot(z); Plotting.plot(numpy.c_[e[-1],e[+1]], hold=1); pylab.subplot(1,2,2)
f,y = list(zip(*z))
f = numpy.asarray(f)
return numpy.mean([f[ind].mean(), f[ind+1].mean()])
def kern(x, x2=None, spec=None, **kwargs):
spec = sstruct(spec)
spec._update(kwargs)
func = spec.func
del spec.func
if spec._fields == ['params']: spec = spec.params
spec = dict(spec._allitems())
return func(x, x2, **spec)
def demodata(n=(80,40), randseed=None):
if isinstance(n, int): n = (int(n/2.0), int(n/2.0)+int(int(n/2.0) < n/2.0))
n1,n2 = n
m1 = [0.75, 0]
S1 = [[1, -0.3], [-0.3, 1]]
m2 = [-0.75, 0]
S2 = [[1, 0.95], [0.95, 1]]
oldrandstate = None
if randseed != None:
oldrandstate = numpy.random.get_state();
if isinstance(randseed, int): numpy.random.seed(randseed)
else: numpy.random.set_state(randseed)
x1 = numpy.random.randn(n1,2) * svd(S1).sqrtm + m1
x2 = numpy.random.randn(n2,2) * svd(S2).sqrtm + m2
if oldrandstate != None:
numpy.random.set_state(oldrandstate)
x = numpy.r_[x1, x2]
y = [-1] * n1 + [+1] * n2
return numpy.asarray(x),numpy.asarray(y)
class predictor(sstruct):
"""
Virtual superclass for classification and regression problems.
klr2class is an example of a working subclass, for classification.
# example 1: single training, kernel computed by hand
from SigTools.LearningTools import *
xtrain,ytrain = demodata()
c = klr2class(C=1.0, relcost='balance', lossfunc=balanced_loss)
s = guesswidth(xtrain,ytrain)
K = rbfkern(xtrain, sigma=s)
c.train(K=K, y=ytrain)
print c
xtest,ytest = demodata()
K_testtrain = rbfkern(xtest, xtrain, sigma=s)
c.test(K=K_testtrain, true=ytest)
print c
# example 2: using a kernel function inside the cross-validation loop
from SigTools.LearningTools import *
xtrain,ytrain = demodata()
c = klr2class(relcost='balance', lossfunc=balanced_loss)
c.varyhyper({'C':[10.0,1.0,0.1], 'kernel.func':rbfkern, 'kernel.sigma':[0.5,1.0,1.5,2.0]})
c.train(x=xtrain, y=ytrain)
xtest,ytest = demodata()
c.test(x=xtest, true=ytest)
c.plotd()
"""###
def __init__(self, hyper=None, model=None, output=None, verbosity=2, lossfunc=class_loss, lossfield='y'):
sstruct.__init__(self)
self.input = sstruct()
self.input.x=None
self.input.K=None
self.input.y=None
self.input.istrain=None
self.input.istest=None
self.hyper = sstruct(hyper)
self.model = sstruct(model)
self.output = sstruct(output)
self.loss = sstruct()
self.update_loss(func=lossfunc, field=lossfield, training=False, testing=False)
self.verbosity = verbosity
self._allowedfields = list(self._fields) + ['cv']
def write(self, txt):
import sys
sys.stdout.write(txt)
sys.stdout.flush()
def update_loss(self, func=None, field=None, training=True, testing=True):
self.loss.func = getattr(self.loss, 'func', None)
self.loss.field = getattr(self.loss, 'field', None)
newfunc = (func != None and func != self.loss.func)
newfield = (field != None and field != self.loss.field)
if newfunc: self.loss.func = func
if newfield: self.loss.field = field
if training or newfunc or newfield:
self.loss.train = None
self.loss.train_se = None
if testing or newfunc or newfield:
self.loss.test = None
self.loss.test_se = None
if self.loss.func != None and (training or testing):
if not hasattr(self.output, self.loss.field): raise RuntimeError("could not find subfield 'output.%s'" % self.loss.field)
out = getattr(self.output, self.loss.field)
istrain = self.input.istrain
istest = self.input.istest
if training and istrain != None and istrain.any(): self.loss.train, self.loss.train_se = self.loss.func(true=self.input.y[istrain], predicted=out[istrain])
if testing and istest != None and istest.any(): self.loss.test, self.loss.test_se = self.loss.func(true=self.input.y[istest], predicted=out[istest])
return self
def pretrain(self): pass
def training(self): raise RuntimeError("no training() method implemented")
def testing(self): raise RuntimeError("no testing() method implemented")
def doublecv(self, x=None, K=None, y=None, istrain=None, istest=None, outerfg=None, **innerkwargs):
"""
All-singing all-dancing double-nested cross-validated training/evaluation procedure.
To train/test once, as per cvtrain(), supply <istrain> and/or <istest>.
To train/test on multiple folds, supply a foldguide object as <outerfg>.
To perform a hyperparameter grid-search during cross-validation within each
training set, make sure that self.hyper is an experiment object (for example,
use the varyhyper() method to do this). Extra **kwargs are passed on to
cvtrain() which uses them in the construction of a foldguide object, to dictate
how the inner folding is performed. This includes <randomseed>, which is
handled in a special way: a different inner <randomseed> is used for each
outer fold (the seed is incremented by 1 from one outer fold to the next),
but the same <randomseed> is used across all grid-search conditions within
a given outer fold: this ensures that each hyperparameter setting is tested
on exactly the same train/test splits as each other.
The return value is a list of trained copies of <self>, one per outer-fold.
If grid-search was performed, each object has .model, .output and .loss fields
corresponding to the best-in-grid hyperparameter settings for that fold. An
additional substruct .cv gives some insight into this decision.
Note that it is often convenient to conceptualize inner-cv-with-hyperparameter-
grid-search as an integral part of an algorithm's "training" procedure.
Therefore, if an object has an experiment object in self.hyper, this function
will also be called automatically (in one-outer-fold-only mode) if you call
self.train() or self.cvtrain(), these two being synonymous for such objects.
The difference to calling doublecv() directly is merely that the attributes
of self are then actually updated to match the chosen setting, instead of a
copy being returned.
"""###
folded = (outerfg != None)
varied = isinstance(self.hyper, experiment)
if folded:
n_outerfolds = len(outerfg)
if istest != None or istrain != None: raise ValueError("either supply outerfg, or (istrain and/or istest), but not a mixture")
else:
n_outerfolds = 1
if varied:
expt = self.hyper
else:
expt = [self.hyper]
if 'cv' in self._fields: del self.cv
n_conditions = len(expt)
x,K,y,istrain,istest = self.resolve_training_inputs(x=x,K=K,y=y,istrain=istrain,istest=istest)
if folded and not isequal(outerfg.ids, list(range(len(y)))): raise ValueError("outerfg has the wrong ids for this dataset")
z = numpy.zeros((n_conditions,n_outerfolds), dtype=numpy.float64)
inner_loss = z + numpy.nan
inner_se = z + numpy.nan
outer_loss = z + numpy.nan
outer_se = z + numpy.nan
chosen = [None for i in range(n_outerfolds)]
chosenind = [None for i in range(n_outerfolds)]
if 'ntrain' not in innerkwargs and 'ntest' not in innerkwargs and 'folds' not in innerkwargs: innerkwargs['folds'] = 10
innerseed = innerkwargs.pop('randomseed', None)
if innerseed == None:
innerseed = [foldguide.next_randomseed() for ifold in range(n_outerfolds)]
if isinstance(innerseed, (int,float)):
innerseed = [innerseed + ifold for ifold in range(n_outerfolds)]
import inspect
innernames,xx,yy,innerdefaults = inspect.getargspec(foldguide.__init__)
folding = dict([(k,v) for k,v in zip(innernames[1:], innerdefaults) if v != None])
folding.update(innerkwargs)
prevkernel = None
makekernel = 'kernel' in expt[0]._fields
input_for_kernel = None
if makekernel:
if K != None: self.input.K = K = None
#if K != None: raise ValueError("since hyper.kernel field exists, input argument K is unexpected")
if x == None: raise ValueError("since hyper.kernel field exists, input argument x is expected")
if expt._order == 'C': expt._reorder_fields('kernel', 0)
elif expt._order == 'F': expt._reorder_fields('kernel', -1)
input_for_kernel = x
x = None
for icond,condition in enumerate(expt):
if self.verbosity and varied:
self.write('hyper[%d], condition %d of %d (%s):\n' % (icond, icond+1, len(expt), expt._shortdesc(condition)))
self_c = self.copy(deep=False)
self_c.hyper = condition
if makekernel:
if not isequal(condition.kernel, prevkernel):
K = kern(x=input_for_kernel, spec=condition.kernel)
del condition.kernel
for ifold in range(n_outerfolds):
if folded: istrain,istest = outerfg[ifold]
self_cf = self_c.copy(deep=False)
self_cf.cvtrain(x=x,K=K, y=y, istrain=istrain, istest=istest, randomseed=innerseed[ifold], **innerkwargs)
isbest = (chosen[ifold] == None)
isbest = isbest or self_cf.loss.train < chosen[ifold].loss.train
isbest = isbest or (self_cf.loss.train == chosen[ifold].loss.train and self_cf.loss.train_se < chosen[ifold].loss.train_se)
if isbest:
chosen[ifold] = self_cf
chosenind[ifold] = icond
inner_loss[icond,ifold] = self_cf.loss.train
inner_se[icond,ifold] = self_cf.loss.train_se
outer_loss[icond,ifold] = self_cf.loss.test
outer_se[icond,ifold] = self_cf.loss.test_se
for ifold in range(n_outerfolds):
self_f = chosen[ifold]
if self_f == None: continue
if 'cv' in self_f._fields: del self_f.cv
if self_f.input.x == None: self_f.input.x = input_for_kernel
self_f._setfield('cv.folding', dict(folding, randomseed=innerseed[ifold]))
self_f._setfield('cv.chosen.index', chosenind[ifold])
self_f._setfield('cv.chosen.hyper', expt[chosenind[ifold]])
self_f._setfield('cv.inner.mean', list(inner_loss[:,ifold]))
self_f._setfield('cv.inner.se', list(inner_se[:,ifold]))
self_f._setfield('cv.outer.mean', list(outer_loss[:,ifold]))
self_f._setfield('cv.outer.se', list(outer_se[:,ifold]))
self_f.hyper = self.hyper.copy(deep=True)
self_f._reorder_fields('verbosity', -1)
if self.verbosity and varied:
for ifold,self_f in enumerate(chosen):
if folded: self.write('outerfg[%d] - ' % (ifold))
fname, outerstr = self_f.loss_str(field='test')
fname, innerstr = self_f.loss_str(field='train')
self.write("picked hyper[%d] (%s): %s = outer %s; inner %s\n" % (
self_f.cv.chosen.index,
expt._shortdesc(self_f.cv.chosen.hyper),
fname,
outerstr,
innerstr,
))
return chosen
def loss_str(self, field=None, mean=None, se=None):
fname = self.loss.func.__name__.replace('_', ' ')
if field != None:
if mean == None: mean = self.loss._getfield(field)
if se == None: se = self.loss._getfield(field + '_se')
if fname in ['balanced loss', 'class loss']:
fmt = '%4.1f%%'
if mean != None: mean *= 100.0
if se != None: se *= 100.0
else:
fmt = '%.3f'
if mean == None:
s = 'N/A'
else:
s = fmt % mean
if se != None: s += (' +/- ' + fmt) % se
return fname, s
def defaults(self): return {}
def varyhyper(self, *pargs, **kwargs):
if not isinstance(self.hyper, experiment):
self.hyper = experiment([(k,[v]) for k,v in self.hyper._allitems()])
if len(kwargs) == 0 and len(pargs) == 0: kwargs = self.defaults()
for arg in pargs:
if hasattr(arg, '_allitems'): arg = arg._allitems()
elif hasattr(arg, 'items'): arg = list(arg.items())
for k,v in arg: self.hyper._setfield(k,v)
for k,v in list(kwargs.items()): self.hyper._setfield(k, v)
return self
def fixhyper(self, *pargs, **kwargs):
if 'cv' in self._fields:
self.hyper = self.cv.chosen.hyper
del self.cv
elif isinstance(self.hyper, experiment):
if len(self.hyper) == 1: self.hyper = self.hyper[0]
elif len(pargs) == 0 and len(kwargs) == 0: raise ValueError("cannot fix hyperparameters---no CV has been performed")
for arg in pargs:
if hasattr(arg, '_allitems'): arg = arg._allitems()
elif hasattr(arg, 'items'): arg = list(arg.items())
for k,v in arg: self.hyper._setfield(k,v)
for k,v in list(kwargs.items()): self.hyper._setfield(k, v)
return self
def train(self, x=None, K=None, y=None, istrain=None, istest=None, **kwargs):
if isinstance(self.hyper, experiment): return self.cvtrain(x=x, K=K, y=y, istrain=istrain, istest=istest, **kwargs)
printtime = kwargs.pop('printtime', False)
if printtime: import time; tstart = time.time()
if len(kwargs): raise TypeError("unexpected keyword arguments---use these only when self.hyper is an experiment object, so train() and cvtrain() are synonymous")
if 'kernel' in self.hyper._fields: self.input.K = None
x,K,y,istrain,istest = self.resolve_training_inputs(x=x, K=K,y=y,istrain=istrain,istest=istest)
input_for_kernel = x
if 'kernel' in self.hyper._fields:
if x == None: raise ValueError("since hyper.kernel field exists, input x is expected")
if K != None: raise ValueError("since hyper.kernel field exists, input K is unexpected")
K = kern(x=x, spec=self.hyper.kernel)
x = None
for f in set(self.input._fields): setattr(self.input, f, None)
# subclass method training() will *only* see the training set: precludes label leakage
if istrain.all():
self.input.x = x
self.input.K = K
self.input.y = y
self.input.istrain = istrain
self.input.istest = istest
else:
if x != None: self.input.x = numpy.asarray(x)[istrain]
if K != None: self.input.K = K[istrain,:][:, istrain]
self.input.y = y[istrain]
self.input.istrain = istrain[istrain]
self.input.istest = istest[istrain]
for f in set(self.model._fields): setattr(self.model, f, None)
for f in set(self.output._fields): setattr(self.output, f, None)
if self.input.K == None: self.training(x=self.input.x, y=self.input.y)
else: self.training(K=self.input.K, y=self.input.y) # should set model fields
if K == None: ntrain = len(x)
else: ntrain = K.shape[1]
self.input.x = x
if K != None:
if istrain[:ntrain].all(): self.input.K = K
else: self.input.K = K[:, istrain]
self.input.y = None
self.input.istrain = istrain
self.input.istest = istest
for f in set(self.output._fields): setattr(self.output, f, None)
if self.input.K == None: self.testing(x=self.input.x)
else: self.testing(K=self.input.K) # should set output fields
if x == None: x = input_for_kernel
self.input.x = x
self.input.K = K
self.input.y = y
self.input.istrain = istrain
self.input.istest = istest
self.update_loss(training=True, testing=True)
if self.verbosity >= 2: self.write(self.summarystr()[0] + '\n')
elif self.verbosity >= 1: self.write(self.summarystr()[1])
if printtime: print("%.2f seconds" % (time.time() - tstart))
return self
def test(self, x=None, K=None, true=None):
if x == None and K == None: raise ValueError("either K or x must be supplied")
self.fixhyper()
if 'kernel' in self.hyper._fields and K==None:
K = kern(x=x, x2=self.input.x, spec=self.hyper.kernel)
if K == None:
nnew = len(x)
full_K = None
else:
K = numpy.asmatrix(K, dtype=self.input.K.dtype)
if K.shape[1] != self.input.K.shape[1]: raise ValueError("test/train kernel must have the same number of columns as the kernel used for training (=%d)" % self.input.K.shape[1])
nnew = K.shape[0]
full_K = numpy.r_[self.input.K, K]
if x == None:
full_x = None
self.input.x = None
else:
if isinstance(self.input.x, list): full_x = self.input.x + list(x)
else: full_x = numpy.concatenate((self.input.x,x), axis=0)
self.loss.test = self.loss.test_se = None
got_true = (true != None)
if not got_true:
true = numpy.zeros((nnew,)+self.input.y.shape[1:], dtype=float)
true.flat = numpy.nan
self.loss.test = self.loss.test_se = None
full_y = numpy.r_[self.input.y, project(true, len(self.input.y.shape)-1)]
full_istrain = numpy.r_[self.input.istrain, numpy.zeros((nnew,), dtype=numpy.bool)]
full_istest = numpy.r_[self.input.istest * False, numpy.ones((nnew,), dtype=numpy.bool)]
if K != None:
kcols = full_K.shape[1]
if full_istrain[:kcols].all(): self.input.K = full_K
else: self.input.K = full_K[:, full_istrain[:kcols]]
self.input.x = full_x
self.input.y = None
self.input.istrain = full_istrain
self.input.istest = full_istest
if K == None: self.testing(x=self.input.x)
else: self.testing(K=self.input.K)
self.input.x = full_x
self.input.K = full_K
self.input.y = full_y
self.input.istrain = full_istrain
self.input.istest = full_istest
self.update_loss(training=False, testing=got_true)
return self
def cvtrain(self, x=None, K=None, y=None, istrain=None, istest=None, **kwargs):
"""
train() once on the outer fold (dictated by <istrain> and <istest> as
usual) reporting the test results in self.loss.test as usual. In the
self.output fields (for example, self.output.f) the elements
self.output.f[self.input.istest] are as normal.
Reported performance on the training set (both in self.loss.train
and in self.output.*[self.input.istrain] ) come from a cross-validation
within the <istrain> set.
Additional **kwargs are passed through to the construction of a
foldguide() object.
"""###
if isinstance(self.hyper, experiment):
if kwargs.get('outerfg', None) != None: raise ValueError("outerfg is only a valid argument when doublecv() is called directly, not via train() or cvtrain()")
cc = self.doublecv(x=x,K=K,y=y,istrain=istrain,istest=istest, **kwargs)
if len(cc) != 1: raise RuntimeError("multiple outputs from doublecv--this shouldn't happen")
chosen = cc[0]
self.input = chosen.input
self.model = chosen.model
self.output = chosen.output
self.loss = chosen.loss
self.cv = chosen.cv
self._reorder_fields('verbosity', -1)
return self
outer = self
outer.verbosity -= 1
verb = outer.verbosity
indent = ' '
if verb == 1: self.write(indent)
if verb == 1: numstr = indent
outer.train(x=x, K=K, y=y, istrain=istrain, istest=istest)
del istrain, istest
sep = ' // '
if verb == 1: self.write(sep)
if verb == 1: numstr += outer.summarystr()[2] + sep
outer.verbosity += 1
x, K, y = outer.input.x, outer.input.K, outer.input.y
visible = numpy.where(outer.input.istrain)[0]
for f,v in self.output._allitems():
v[self.input.istrain] = numpy.nan
#inner = []
fg = foldguide(ids=visible, labels=y[visible], **kwargs)
#print ' '.join(['%d'%iii for iii in fg[0][1]])
result = {'mean':0.0, 'ste':0.0, 'n':0}
for foldnumber,(tr,val) in enumerate(fg):
each = self.copy()
#inner.append(each)
if 'kernel' in each.hyper._fields and K != None: del each.hyper.kernel
each.verbosity -= 1
each.train(x=x, K=K, y=y, istrain=tr, istest=val)
if verb == 1: numstr += each.summarystr()[2]
each.verbosity += 1
result = seqste(result, {'mean':each.loss.test, 'ste':each.loss.test_se, 'n':len(val)})
for f,v in self.output._allitems():
v[each.input.istest] = each.output._getfield(f)[each.input.istest]
self.loss.train = result['mean']
self.loss.train_se = result['ste']
if verb == 1: self.write(' (avg=%.3f)\n' % self.loss.train)
if verb == 1: self.write(numstr + '\n')
return self
def featureweight(self, x=None, primalname='weights', dualname='alpha'):
if x == None:
return self.model._getfield(primalname)
trainind = numpy.where(self.input.istrain)[0]
if len(x) == len(trainind):
trainind = list(range(len(x)))
elif len(x) != self.input.K.shape[0]:
raise ValueError("wrong number of input data points x")
w = None
alpha = self.model._getfield(dualname)
for i,a in zip(trainind, alpha.flat):
wi = x[i] * a
if w == None: w = wi
else: w += wi
return w
def summarystr(self):
ntrain = sum(self.input.istrain)
ntest = sum(self.input.istest)
longstr = self.__class__.__name__
shortstr = ' (?????) '
if ntrain == 0:
longstr = '%s (untrained)' % longstr
else:
longstr = '%s trained on %d' % (longstr, ntrain)
if self.loss.train != None:
longstr = '%s (%s = %.3f)' % (longstr, self.loss.func.__name__, self.loss.train)
shortstr = '(%.3f)' % self.loss.train
if ntest:
longstr = '%s, tested on %d' % (longstr, ntest)
if self.loss.test != None:
longstr = '%s (%s = %.3f)' % (longstr, self.loss.func.__name__, self.loss.test)
shortstr = ' %.3f ' % self.loss.test
numstr = '%d:%d ' % (ntrain,ntest)
shortstr = shortstr.rjust(max(len(shortstr), len(numstr)))
numstr = numstr.rjust(max(len(shortstr), len(numstr)))
return longstr,shortstr,numstr
def resolve_training_inputs(self, x=None, K=None, y=None, istrain=None, istest=None):
if x == None: x = self.input.x
if K == None: K = self.input.K
if y == None: y = self.input.y
if istrain == None and istest == None:
istrain = self.input.istrain
istest = self.input.istest
if x == None and K == None: raise ValueError("no data and no kernel supplied")
#if x != None and K != None: raise ValueError("supply data or kernel, but not both")
if y == None: raise ValueError("no labels supplied")
if K == None:
ntest = ntrain = len(x)
ksiz = (ntest,ntrain)
else:
K = numpy.asmatrix(K)
ntest,ntrain = ksiz = K.shape
if not isinstance(y, numpy.ndarray): y = numpy.asarray(y)
y = y.view()
while len(y.shape) < 2: y.shape = y.shape + (1,)
nlabels = len(y)
if nlabels != ntest: raise ValueError("the number of labels must match the number of rows in the kernel")
use_explicit_istest = False
if istrain == None:
istrain = numpy.arange(max(ksiz)) < min(ksiz)
use_explicit_istest = True
istrain = numpy.asarray(istrain)
if istrain.size != max(istrain.shape): raise ValueError("istrain must be a vector")
istrain = istrain.flatten()
if istrain.dtype != numpy.bool:
ind = istrain
istrain = numpy.arange(max(ksiz)) < 0
istrain[ind] = True
if istrain.size not in ksiz:
if K == None: raise ValueError("x and istrain have mismatched number of points")
else: raise ValueError("K and istrain have mismatched number of points")
if istrain.size < nlabels: istrain = numpy.r_[istrain, [False] * (nlabels - istrain.size)]
if istest == None: istest = numpy.logical_not(istrain)
istest = numpy.asarray(istest)
if istest.size != max(istest.shape): raise ValueError("istest must be a vector")
istest = istest.flatten()
if istest.dtype != numpy.bool:
ind = istest
istest = numpy.arange(max(ksiz)) < 0
istest[ind] = True
if istest.size not in ksiz: raise ValueError("K and istest have mismatched number of points")
if istest.size < nlabels: istest = numpy.r_[istest, [False] * (nlabels - istest.size)]
if use_explicit_istest: istrain = numpy.logical_and(istrain, numpy.logical_not(istest))
if (numpy.where(istrain)[0] > min(ksiz)).any(): raise ValueError("training points are missing from K")
if (numpy.where(istest)[0] > ntest).any(): raise ValueError("testing points are missing from K")
if numpy.logical_and(istrain, istest).any(): raise ValueError("some points are designated both training and testing")
return x,K,y,istrain,istest
def calibrate(self, link=logistic, balance=True):
from .Optimization import psifit
p = psifit(link=link, balance=balance, x=self.output.f[self.input.istrain], y=self.input.y[self.input.istrain])
p.fix(logyoked=-numpy.inf)
p.fix(loglower=-numpy.inf, logupper=-numpy.inf)
p.fix(shift=0)
p.free(logscale=0.0)
p.optimize()
fac = numpy.exp(p.logscale)
if 'alpha' in self.model._fields: self.model.alpha *= fac
if 'weights' in self.model._fields: self.model.weights *= fac
self.model.bias *= fac
self.output.f *= fac
# f' = k f
# f = a x + b = a (x - s)
# f' = a'x + b' = a'(x - s')
# where f' = kf, so a' = ka and b' = kb, so s' = s = -b/a, so the inflection point (x=s,f=0) hasn't moved (x=s, f still=0).
# you may think this obvious, but somehow it took me a while to wrap my head around it: the "bias" is not the same as the shift.
if 'p' in self.output._fields: self.output.p = link(self.output.f)
self.update_loss()
return self
def rebias(self):
if self.verbosity:
c,classes = confuse(self.input.y, self.output.y); err = 1.0 - c.diagonal() / c.sum(axis=1)
print("before rebias: bias = %g; train/CV err on %+d = %.3f; train/CV err on %+d = %.3f, train/CV %s = %.3f" % (self.model.bias, classes[0], err[0], classes[1], err[1], self.loss.func.__name__, self.loss.train))
self.output.f -= self.model.bias
self.model.bias = -eeop(numpy.asarray(self.output.f).flatten(), numpy.asarray(self.input.y).flatten())
self.output.f += self.model.bias
self.output.y = numpy.sign(self.output.f)
if 'p' in self.output._fields: self.output.p = logistic(self.output.f)
self.update_loss()
if self.verbosity:
c,classes = confuse(self.input.y, self.output.y); err = 1.0 - c.diagonal() / c.sum(axis=1)
print("after rebias: bias = %g; train/CV err on %+d = %.3f; train/CV err on %+d = %.3f, train/CV %s = %.3f" % (self.model.bias, classes[0], err[0], classes[1], err[1], self.loss.func.__name__, self.loss.train))
return self
def plotf(self, condition=None, field='output.f'):
"""
Plot values (by default self.output.f) separately in red for
cases where self.input.y > 0 and in blue for cases where
self.input.y < 0.
<condition> may be 'istrain' or 'istest' to further limit which
data are viewed, or None (for everything)
"""###
neg = (self.input.y<0).flat
pos = (self.input.y>0).flat
if condition == None:
neg = numpy.array(neg)
pos = numpy.array(pos)
else:
neg = self.input[condition] * neg
pos = self.input[condition] * pos
if field in self.output._fields: f = self.output._getfield(field)
else: f = self._getfield(field)
from . import Plotting
Plotting.plot(f[neg], hold=False, marker='*', color=(0,0,1), drawnow=False)
Plotting.plot(f[pos], hold=True, marker='+', color=(1,0,0), grid=True)
def plotd(self, hold=False, drawnow=True):
"""
Plot two-dimensional data, along with decision surface of trained classifier.
"""###
xtr = self.input.x[self.input.istrain]
xts = self.input.x[self.input.istest]
ytr = self.input.y[self.input.istrain]
yts = self.input.y[self.input.istest]
dmin = numpy.asarray(self.input.x).min(axis=0)
dmax = numpy.asarray(self.input.x).max(axis=0)
expand = (dmax - dmin) * 0.05
dmin -= expand; dmax += expand
res = 70
c = self.copy()
tr = c.input.istrain
c.input.x = c.input.x[tr]
c.input.y = c.input.y[tr]
if c.input.K != None: c.input.K = c.input.K[tr,:][:,tr] # whereas c.input.K[tr,tr] screws up: a bug in numpy??
c.input.istest = c.input.istest[tr]
c.input.istrain = c.input.istrain[tr]
x = numpy.linspace(dmin[0], dmax[0], res, endpoint=True)
y = numpy.linspace(dmin[1], dmax[1], res, endpoint=True)
xx,yy = numpy.meshgrid(x,y)
c.test(x=numpy.c_[xx.flat,yy.flat])
z = numpy.asarray(c.output.f[c.input.istest])
z.shape = xx.shape
h = {}
from . import Plotting
pylab = Plotting.load_pylab()
ax = pylab.gca()
if not hold: ax.cla()
#h['surf'] = pylab.pcolor(x,y,z)
h['surf'] = Plotting.imagesc(x=x,y=y,img=z, interpolation='bilinear', cmap=pylab.cm.gray)
h['contours'] = pylab.contour(x,y,z, linestyles=['--'], colors=[(0,1.0,0)], hold='on')
h['boundary'] = pylab.contour(x,y,z, (0,), colors=[(0,1.0,0)], linewidths=3, hold='on')
for hh in list(h.values()): hh.set_clim(numpy.array([-1,+1]) * max(abs(numpy.array([z.min(), z.max()]))))
if len(ytr): h['tr+'] = pylab.plot(xtr[ytr.flat>0][:,0], xtr[ytr.flat>0][:,1], mec=(0.2,0.0,0.0), mfc=(1.0,0.0,0.0), marker='s', linestyle='none', markersize=7)
if len(ytr): h['tr-'] = pylab.plot(xtr[ytr.flat<0][:,0], xtr[ytr.flat<0][:,1], mec=(0.0,0.0,0.2), mfc=(0.0,0.0,1.0), marker='s', linestyle='none', markersize=7)
if len(yts): h['ts+'] = pylab.plot(xts[yts.flat>0][:,0], xts[yts.flat>0][:,1], mec=(0.2,0.0,0.0), mfc=(1.0,0.5,0.5), marker='o', linestyle='none', markersize=7)
if len(yts): h['ts-'] = pylab.plot(xts[yts.flat<0][:,0], xts[yts.flat<0][:,1], mec=(0.0,0.0,0.2), mfc=(0.5,0.5,1.0), marker='o', linestyle='none', markersize=7)
ax.set(xlim=[dmin[0],dmax[0]], ylim=[dmin[1],dmax[1]])
if drawnow: pylab.draw()
return h
def plotw(self, xtrain=None, normaxis=None, norm=2, x=None, y=None, **kwargs):
"""
Plot weights of a linear predictor (supply the training data in <xtrain> if
this was a kernel implementation of a linear predictor). Weights may be
formatted in a 1- or 2-dimensional array (depending on how the data exemplars
were formatted): a stem plot is used for 1-D and an image for 2-D.
Dimensionality may be reduced by 1 by taking the L-<norm> norm along axis
<normaxis>. Optional arguments <x> and <y> supply the x- and (for image plots)
y-axis data for plotting.
"""###
w = self.featureweight(x=xtrain)
if normaxis != None: w = (w ** norm).sum(axis=normaxis) ** 1.0/norm
kwargs['aspect'] = kwargs.get('aspect', 'auto')
kwargs['balance'] = kwargs.get('balance', {None:0.0}.get(normaxis, None))
kwargs['grid'] = kwargs.get('grid', True)
if len(w.shape) == 2:
kwargs['colorbartitle'] = kwargs.get('colorbartitle', 'weight')
kwargs['colorbarformat'] = kwargs.get('colorbarformat', '%+g')
from . import Plotting; pylab = Plotting.load_pylab()
out = Plotting.imagesc(w, x=x, y=y, **kwargs)
elif len(w.shape) == 1:
if x == None: x = list(range(len(w)))
from . import Plotting; pylab = Plotting.load_pylab()
out = Plotting.stem(x, w, **kwargs)
else:
raise ValueError('do not know how to plot %d-dimensional weight array' % len(w.shape))
return out
def plotopt(cc, hold=False, drawnow=True, **kwargs):
if not isinstance(cc, (list, tuple)): cc = [cc]
expt = cc[0].hyper
lab = [expt._shortdesc(cond) for cond in expt]
inner = sum([numpy.asarray(cci.cv.inner.mean) for cci in cc]) / len(cc)
innerse = sum([numpy.asarray(cci.cv.inner.se) for cci in cc]) / len(cc) # plot the average error-bar size across outer folds, rather than just making the bars unrepresentatively small (we don't actually have that many data points!)
outer = [{
'mean': numpy.asarray(cci.cv.outer.mean),
'ste': numpy.asarray(cci.cv.outer.se),
'n': sum(cci.input.istest),
} for cci in cc]
outer = seqste(*outer)
outer,outerse = outer['mean'],outer['ste']
n = inner.size
from . import Plotting
pylab = Plotting.load_pylab()
if not hold: pylab.cla()
kwargs['mec'] = kwargs.get('mec', 'auto')
kwargs['mfc'] = kwargs.get('mfc', 'auto')
kwargs['markersize'] = kwargs.get('markersize', 10)
explicitmarker = 'marker' in kwargs
if not explicitmarker: kwargs['marker'] = 'o'
hinner = pylab.errorbar(x=1.0+numpy.arange(n), y=inner, yerr=innerse, **kwargs)
if not explicitmarker: kwargs['marker'] = 's'
houter = pylab.errorbar(x=1.0+numpy.arange(n), y=outer, yerr=outerse, **kwargs)
ax = pylab.gca()
ax.set_xlim((0,n+1))
ax.set_xticks(1.0 + numpy.arange(n))
ax.set_xticklabels(lab, rotation=20, horizontalalignment='right')
ax.set_yticks(numpy.arange(0.0,1.1,0.1))
ax.set_ylim([0.0, 1.0])
ax.grid(True)
if drawnow: pylab.draw()
class klr2class(predictor):
def __init__(self, C=0.1, relcost=(1.0,1.0), lossfunc=class_loss, lossfield='y'):
predictor.__init__(self, lossfunc=lossfunc, lossfield=lossfield)
self.hyper.C = C
self.hyper.relcost = relcost
self.model.alpha = None
self.model.bias = None
self.output.y = None
self.output.f = None
self.output.p = None
def defaults(self): return {'C':[10.0, 3.0, 1.0, 0.3, 0.1, 0.03, 0.01, 0.003, 0.001]}
def training(self, K, y):
if len(K.shape) != 2 or K.shape[0] != K.shape[1]: raise ValueError('K must be a square matrix')
if y.size != K.shape[0]: raise ValueError('y must contain one label per row of K')
relcost = self.hyper.relcost
if relcost == None: relcost = 1.0
if relcost == 'balance': relcost = float(sum(y<0.0)) / float(sum(y>0.0))
err = 'hyper.relcost must be a scalar, a two-element sequence, or the string "balance"'
if isinstance(relcost, str): raise ValueError(err)
relcost = numpy.asarray(relcost).flatten()
if relcost.size == 1: relcost = relcost ** [-0.5, 0.5]
if relcost.size != 2: raise ValueError(err)
if relcost[0]==relcost[1]: relcost = None
varK = K.diagonal().mean() - K.mean()
C = self.hyper.C
C = numpy.asarray(C).flatten()
if C.size != 1: raise ValueError('hyper.C must be a scalar')
C = C[0] * varK
from . import klr
ab,f,J,obj = klr.klr_cg(K=K, Y=y, C=C, wght=relcost, verb=-1)
self.model.alpha = ab[:-1]
self.model.bias = ab[-1].flat[0]
def testing(self, K):
if K.shape[1] != len(self.model.alpha): raise ValueError('wrong number of training points (columns) in test/train kernel')
self.output.f = numpy.asmatrix(K) * self.model.alpha + self.model.bias
self.output.y = numpy.sign(self.output.f)
self.output.y[self.output.y==0] = 1
self.output.p = 1.0 / (1.0 + numpy.exp(-self.output.f))
class lda2class(predictor):
def __init__(self, shrinkage='optimal', lossfunc=class_loss, lossfield='y'):
predictor.__init__(self, lossfunc=lossfunc, lossfield=lossfield)
self.hyper.shrinkage = shrinkage
self.model.weights = None
self.model.bias = None
self.output.y = None
self.output.f = None
self.output.p = None
def defaults(self): return {'shrinkage':['optimal', 0.0, 0.001, 0.005, 0.05, 0.5, 0.8, 1.0]}
def training(self, x, y):
n = {}; m = {}; c = {}; z = {}
y = numpy.sign(y.flat)
for i,yi in enumerate(y):
n[yi] = n.get(yi, 0) + 1
m[yi] = m.get(yi, 0.0) + x[i]
for k,v in list(m.items()): v /= n[k]
for i,yi in enumerate(y):
xi = x[i] - m[yi]
zi = numpy.outer(xi.flat, xi.flat)
c[yi] = c.get(yi, 0.0) + zi
if self.hyper.shrinkage == 'optimal':
for k,v in list(c.items()): v /= n[k] # mean of centered per-exemplar outer products (biased ML estimator of cov)
for i,yi in enumerate(y):
xi = x[i] - m[yi]
zi = numpy.outer(xi.flat, xi.flat)
z[yi] = z.get(yi, 0.0) + ((zi - c[yi]).flatten() ** 2).sum()
for k,v in list(c.items()): v *= float(n[k]) / float(n[k]-1) # correct to unbiased estimate
for k in z: z[k] = z[k] * float(n[k]) / float(n[k] - 1) ** 3.0 # divide by (n-1) for unbiased estimate of z variances, then multiply by n/(n-1)^2
else:
for k,v in list(c.items()): v /= n[k]-1 # straight to unbiased estimate
dm = (m[+1] - m[-1])
for k,cov in list(c.items()):
gamma = self.hyper.shrinkage
nu = cov.diagonal().mean()
if gamma == 'optimal':
denom = (cov.flatten()**2).sum() - 2.0 * nu * cov.trace() - cov.shape[0] * nu ** 2.0
gamma = z[k] / denom
shrinkcov(cov, gamma=gamma, nu=nu)
cov = (c[+1] + c[-1]) / 2.0
w = numpy.linalg.solve(cov, dm.flatten())
w.shape = dm.shape
f = numpy.zeros((len(x),),dtype=numpy.float64)
for i,xi in enumerate(x): f[i] = numpy.inner(w.flat,xi.flat)
self.model.weights = w
self.model.bias = -eeop(f,y)
def testing(self, x):
w = self.model.weights.flat
self.output.f = numpy.asarray([numpy.inner(w,xi.flat) for xi in x]) + self.model.bias
self.output.y = numpy.sign(self.output.f)
self.output.p = logistic(self.output.f)
class FoldingError(Exception): pass
class foldguide(object):
randomseed = None # global for all foldguide objects
@classmethod
def next_randomseed(cls):
limit = int(2**31-1)
if foldguide.randomseed == None:
saved_state = numpy.random.get_state()
numpy.random.seed() # randomizes from random-number generator and/or clock...
foldguide.randomseed = numpy.random.randint(limit) # ...but doesn't return the seed it found: so let's use the first number it gets that way
numpy.random.set_state(saved_state)
r = foldguide.randomseed
foldguide.randomseed = (foldguide.randomseed + 1) % limit
return r
def __init__(self, ids=None, labels=None, folds=None, ntrain=None, ntest=None, balance=True, randomseed='auto'):
"""
foldguide constructor parameters:
ids: a list of exemplar ids; shortcut: pass integer n if the ids should be just range(n)
labels: a list of labels, or None if balancing folds doesn't matter (or label information is unavailable)
folds: integer number of folds, or 'LOO' for leave-one-out; could also be a list of lists of test-fold ids for explicit()
ntrain: the size of each training fold, or None for auto
ntest: the size of each test fold, or None for auto
balance: whether to ensure that each fold contains (approximately) the same relative proportions of classes
randomseed: an integer (32-bit), or 'auto'
"""###
#sstruct.__init__(self)
if ids == None:
if labels == None: raise ValueError("either ids or labels must be supplied")
ids = len(labels)
if randomseed == 'auto': randomseed = foldguide.next_randomseed()
if isinstance(ids, foldguide): self.__dict__.update(ids.__dict__); return
if isinstance(ids, int): ids = list(range(ids))
self.n = len(ids)
self.ids = list(ids)
if labels == None: self.labels = None
elif len(labels) != self.n: raise ValueError("mismatched number of ids and labels")
else: self.labels = list(labels)
explicit_testfolds = None
if isinstance(folds, (tuple, list)):
explicit_testfolds = folds
folds = len(folds)
if ntrain != None or ntest != None: raise ValueError("cannot specify ntrain or ntest when folds are supplied as an explicit list")
if ntrain != None and ntest != None: raise ValueError("specify either ntrain or ntest, or neither, but not both")
if ntrain != None and ntrain > self.n / 2.0: ntrain,ntest = None, self.n - ntrain
if ntest != None and ntest > self.n / 2.0: ntrain,ntest = self.n - ntest, None
swap = False
if ntrain != None: foldsize = float(ntrain); swap = True
elif ntest != None: foldsize = float(ntest)
else:
if folds == None: folds = min(len(ids),10)
if isinstance(folds, str) and folds.lower() in ['loo', 'leave one out']: folds = len(ids)
foldsize = float(self.n) / folds
if folds == None: folds = int(round( numpy.ceil(self.n / float(foldsize)) ))
self.balanced = balance
order = list(range(self.n))
self.randomseed = randomseed
if self.randomseed != None:
saved_state = numpy.random.get_state()
numpy.random.seed(self.randomseed)
#print "shuffling with", self.randomseed
numpy.random.shuffle(order)
numpy.random.set_state(saved_state)
self.classes = []
categorized = []
saved_order = list(order)
i = len(order)
while len(order):
wrap = i >= len(order)
if wrap: i = 0
if labels == None: thislabel = None
else: thislabel = labels[order[i]]
if wrap:
matchlabel = thislabel
self.classes.append(matchlabel)
categorized.append([])
if isequal(matchlabel, thislabel):
categorized[-1].append(order.pop(i))
else:
i += 1
if not balance: categorized = [saved_order]
self.indices = []
distributed = [None for i in range(folds)]
riffled = [];
for cl in range(len(categorized)):
c = categorized[cl]
n = len(c)
k = numpy.linspace(0.0, 1.0, n, endpoint=True)
riffled += list(zip(k, c))
riffled = numpy.array([ind for x,ind in sorted(riffled)])
foldstart = numpy.linspace(0.0, self.n-foldsize, folds, endpoint=True)
for ifold in range(folds):
start,stop = int(round(foldstart[ifold])), int(round(foldstart[ifold] + foldsize))
distributed[ifold] = riffled[start:stop]
a = set(range(self.n))
self.indices = tuple([(
tuple(sorted(a - set(d))),
tuple(sorted(d)),
) for d in distributed])
if swap: self.indices = tuple([(b,a) for a,b in self.indices])
try: self.classes.sort()
except: pass
if explicit_testfolds != None:
self.explicit(test = explicit_testfolds)
def __repr__(self):
s = "<%s.%s instance at 0x%08X>" % (self.__class__.__module__,self.__class__.__name__,id(self))
trmean,tsmean = numpy.mean([ (len(tr),len(ts)) for tr,ts in self.indices], axis=0)
nfolds = len(self.indices)
describe_as_balanced = self.balanced and len(self.classes) > 1
describe_as_balanced = {True:' balanced ', False:' '}[describe_as_balanced]
s += "\n %d%sfolds each ~ %d:%d" % (nfolds, describe_as_balanced, round(trmean), round(tsmean))
s += ", randomseed = %s" % str(self.randomseed)
over = self.check()
s += "\n avg overlap between folds = %.3g%% : %.3g%%" % (100.0 * over['tr']['average'], 100.0 * over['ts']['average'])
ijust = len('%d' % (nfolds - 1))
trlabels,tslabels = [],[]
for i in range(nfolds):
tr,ts = self.indices[i]
trlabels.append('(' + '/'.join(['%d' % len([ind for ind in tr if self.labels == None or isequal(self.labels[ind], c)]) for c in self.classes]) + ')')
tslabels.append('(' + '/'.join(['%d' % len([ind for ind in ts if self.labels == None or isequal(self.labels[ind], c)]) for c in self.classes]) + ')')
trljust = max([len(x) for x in trlabels])
for i in range(nfolds):
s += "\n fold %s --- %s:%s" % ( ('%d'%i).rjust(ijust), trlabels[i].rjust(trljust), tslabels[i] )
return s
def __getitem__(self, i):
return self.get(i, 'ids')
def __len__(self):
return len(self.indices)
def get(self, fold, attr='ids'):
lookup = getattr(self, attr)
return (
tuple([lookup[x] for x in self.indices[fold][0]]),
tuple([lookup[x] for x in self.indices[fold][1]]),
)
def check(self):
nfolds = len(self.indices)
for i in range(nfolds):
tr,ts = self.indices[i]
overlap = sorted(set(tr).intersection(ts))
union = sorted(set(tr).union(ts))
if len(tr) == 0: raise FoldingError("fold %d of foldguide 0x%08x is corrupt: training fold is empty" % (i, id(self),))
if len(ts) == 0: raise FoldingError("fold %d of foldguide 0x%08x is corrupt: test fold is empty" % (i, id(self),))
if len(overlap): raise FoldingError("fold %d of foldguide 0x%08x is corrupt: overlap of %d items between training and test fold" % (i, id(self), len(overlap),))
if len(union) < self.n: raise FoldingError("fold %d of foldguide 0x%08x is corrupt: %d items are missing from both training and test fold" % (i, id(self), self.n-len(union)))
if len(union) > self.n: raise FoldingError("fold %d of foldguide 0x%08x is corrupt: %d extra unexpected items" % (i, id(self), len(union)-self.n))
tr,ts = list(zip(*self.indices))
tr = sorted(set(reduce(tuple.__add__, tr)))
if len(tr) < self.n: raise FoldingError("foldguide 0x%08x is corrupt: %d items never appear in the training folds" % (id(self), self.n - len(tr)))
if len(tr) > self.n: raise FoldingError("foldguide 0x%08x is corrupt: %d extra unexpected items appear in the training folds" % (id(self), len(tr) - self.n))
ts = sorted(set(reduce(tuple.__add__, ts)))
if len(ts) < self.n: raise FoldingError("foldguide 0x%08x is corrupt: %d items never appear in the test folds" % (id(self), self.n - len(ts)))
if len(ts) > self.n: raise FoldingError("foldguide 0x%08x is corrupt: %d extra unexpected items appear in the test folds" % (id(self), len(ts) - self.n))
overlap_tr = {}
overlap_ts = {}
for j in range(nfolds):
for i in range(j):
indi = set(self.indices[i][0])
indj = set(self.indices[j][0])
overlap_tr[(i,j)] = 2.0 * float(len(indi.intersection(indj))) / (len(indi) + len(indj))
indi = set(self.indices[i][1])
indj = set(self.indices[j][1])
overlap_ts[(i,j)] = 2.0 * float(len(indi.intersection(indj))) / (len(indi) + len(indj))
overlap_tr['average'] = numpy.mean(list(overlap_tr.values()))
overlap_ts['average'] = numpy.mean(list(overlap_ts.values()))
return {'tr':overlap_tr, 'ts':overlap_ts}
def explicit(self, training=None, test=None):
"""
Supply, as either <training> or <test> but not both, a list of lists of ids.
The ids will be re-folded explicitly in the specified way.
"""###
if training == None and test == None: raise ValueError("must supply either training or test")
if training != None and test != None: raise ValueError("must supply either training or test, but not both")
if training != None: folded = training
else: folded = test
nfolds = len(folded)
unmatched = reduce(list.__add__, [[str(x) for x in foldids if x not in self.ids] for foldids in folded])
if len(unmatched): raise ValueError("ids not found: %s" % ','.join(unmatched))
result = []
allind = set(range(len(self.ids)))
for foldids in folded:
specified = [self.ids.index(x) for x in foldids]
rest = allind - set(specified)
if training != None: result.append((tuple(specified), tuple(rest)))
else: result.append((tuple(rest), tuple(specified)))
oldindices = self.indices
self.indices = tuple(result)
try:
self.check()
except FoldingError as e:
self.indices = oldindices
raise FoldingError(str(e).split(':')[-1])
self.randomseed = 'explicit'
self.balanced = False # TODO: maybe could do better at inferring this
class experiment(sstruct):
"""
An sstruct subclass, hence inheriting (versions of) the
_setitem, _getitem, _allfields and _allitems methods.
May have fields and subfields. Values at the leaves of the
tree are forced, when assigned, to be lists (if they are
not already lists, they become one-item lists).
Iteration over the object iterates over combinations of
the leaf elements (conditions of the experiment), returning
sstruct objects.
Example:
expt = {'a.x': 'hello', 'a.y':[1,2,3], 'b':['foo', 'bar']}
expt = experiment(expt)
for i,condition in enumerate(expt): print 'condition', i; print condition
Additional methods (over and above inherited sstruct methods)
include _shape(), _reshape() and _shortdesc()
Conditions can be dereferenced with a single serial index expt[i]
or with multi-dimensional subscripts expt[p,q,r,...]
expt._order is a special property which dictates the order in
which conditions come out when dereferenced with the serial index.
It is the same as numpy's array order: the default, _order='C',
means that the last-listed subfield varies fastest, whereas
the alternative _order='F' would mean that the first-listed
subfield varies fastest.
"""###
def __init__(self, _baseobj=None, _order='C', **kwargs):
self._inherit(_baseobj, _recursive=True, **kwargs)
self._order = _order
def __setattr__(self, f, v):
if f == '_order' and v not in ('C', 'F'): raise ValueError("_order must be 'C' or 'F'")
sstruct.__setattr__(self, f, v)
if f not in self._fields: return
if isinstance(v, list): v = list(v)
elif isinstance(v, sstruct): v = self.__class__(v)
else: v = [v]
self.__dict__[f] = v
def __len__(self):
n = 1
for f,v in self._allitems(): n *= len(v)
return n
def _shape(self):
"""
Return a tuple containing the number of levels in each subfield.
"""###
return [len(v) for f,v in self._allitems()]
def _reshape(self, x):
"""
Use numpy.reshape() to reshape a sequence x (perhaps a list of results
from each experimental condition?) into an array the same "shape" as the
experiment as given by _shape().
"""###
return numpy.reshape(x, self._shape(), order=self._order)
def _shortdesc(self, x, delim=', '):
"""
Describe condition (or other sstruct) <x> in one line, in terms of how
it differs from the experiment <self>. Constants (i.e. fields of the
experiment that have exactly one level) are not mentioned.
"""###
if not isinstance(x, sstruct): x = sstruct(x)
def shortstr(v):
cand1 = str(v)
if len(cand1) > 10 and hasattr(v, '__name__') and type(v).__name__.endswith(('function', 'method')): return v.__name__
if isinstance(v, numpy.ndarray) and len(v.shape)>1: cand2 = '[%s %s]' % (v.__class__.__name__, 'x'.join([str(d) for d in v.shape]))
elif isinstance(v, (tuple,list,numpy.ndarray)): cand2 = '[%s of %s item%s]' % (v.__class__.__name__, len(v), {1:''}.get(len(v),'s'))
elif isinstance(v, str): cand2 = '[string length %d]' % len(v)
else: cand2 = '[...]'
if len(cand1) > 10 and len(cand2) < len(cand1): return cand2
return cand1
terms = [('%s=%s' % (k,shortstr(v))) for k,v in x._allitems() if len(self._getfield(k,[0,1]))>1]
return delim.join(terms)
def _ind2sub(self, ind): # TODO: optionally make this C-order instead of F-order
shape = self._shape()
if self._order == 'C': shape = shape[::-1]
s,n = [],1
for sh in shape: s.append(n); n *= sh
if not -n <= ind < n: raise IndexError("index out of range")
for i in range(len(s)-1, -1, -1):
tmp = ind % s[i]
s[i] = (ind - tmp) / s[i]
ind = tmp
if self._order == 'C': s = s[::-1]
return s
def _sub2ind(self, sub):
shape = self._shape()
if self._order == 'C':
sub = list(sub)[::-1]
shape = shape[::-1]
if len(sub) != len(shape): raise IndexError("expected %d subscripts, got %d" % (len(shape),len(s)))
s,n = [],1
for sh in shape: s.append(n); n *= sh
ind,sub = 0,list(sub)
while len(sub): ind += s.pop(0) * sub.pop(0)
return ind
def __getitem__(self, ind):
if isinstance(ind, tuple):
e = sstruct()
s = list(ind)
slicing = True in [isinstance(x, slice) for x in s]
if False in [isinstance(x, (int,slice)) for x in s]: raise IndexError("invalid index type")
if slicing: e = self.__class__(e)
elif isinstance(ind, int):
e = sstruct()
s = self._ind2sub(ind)
elif isinstance(ind, str):
return self._getfield(ind)
else:
raise IndexError("invalid index type")
items = self._allitems()
if len(s) != len(items): raise IndexError("expected %d subscripts, got %d" % (len(items),len(s)))
for f,v in items: e._setfield(f, v[s.pop(0)])
return e
def __iadd__(self, other):
cl = self.__class__
if not isinstance(other, cl): other = cl(other)
absent = []
for f,ov in other._allitems():
sv = self._getfield(f, absent)
if id(sv) == id(absent):
sv = list(ov)
self._setfield(f, sv)
elif f not in self._allfields():
raise ValueError("cannot extend subfield '%s'" % f)
for ovi in ov:
for svi in sv:
if isequal(ovi, svi): break
else:
sv.append(ovi)
return self
def __add__(self, other):
a = copy.deepcopy(self)
a += other
return a
def overlapping(nsamples=None, windowlength=None, nwindows=None, overlap=None):
if nsamples != None:
nsamples = int(nsamples)
if nsamples < 1: raise ValueError("illegal number of samples %d" % nsamples)
if windowlength != None:
windowlength = int(windowlength)
if windowlength < 1: raise ValueError("illegal window length %d" % windowlength)
if nwindows != None:
nwindows = int(nwindows)
if nwindows < 1: raise ValueError("illegal number of windows %d" % nwindows)
if overlap != None:
overlap = float(overlap)
if not 0.0 <= overlap < 1.0: raise ValueError("illegal overlap value %g" % overlap)
original = {'nsamples':nsamples, 'windowlength':windowlength, 'nwindows':nwindows, 'overlap':overlap}
nnones = sum([v==None for k,v in list(original.items())])
if nnones > 1: raise ValueError("insufficient information")
def ceil(x): return int(x) + int(x > int(x))
if nsamples == None:
nsamples = int(round( windowlength + (nwindows - 1.0) * (1.0 - overlap) * windowlength ))
if windowlength == None:
windowlength = int(round( nsamples / (nwindows + overlap - nwindows * overlap) ))
if nwindows == None:
nwindows = int(round( float(nsamples - windowlength * overlap) / (windowlength * (1.0 - overlap)) ))
if nwindows == 0: nwindows = 1
if nwindows == 1: overlap_samples, windowlength = 0,nsamples
else: overlap_samples = ceil(float(windowlength * nwindows - nsamples) / (nwindows - 1.0))
overlap = overlap_samples / float(windowlength)
new_nsamples = windowlength * (nwindows + overlap - nwindows * overlap)
new_nsamples = int(round(new_nsamples)) # rounding should only be necessary due to numerical precision here
skipfront = ceil((nsamples-new_nsamples)/2.0)
step = windowlength - overlap_samples
if step < 1: raise ValueError("nsamples=%d is too small for %d windows of length %d" % (nsamples,nwindows,windowlength))
maxstart = nsamples - windowlength
t0 = list(range(skipfront, maxstart+1, step))
while len(t0) > nwindows: t0.pop()
final = {'nsamples':nsamples, 'windowlength':windowlength, 'nwindows':nwindows, 'overlap':overlap}
for k,v in list(original.items()):
if v == None: continue
fv = final[k]
if k == 'overlap':
v = round(v * windowlength)
fv = round(fv * windowlength)
if abs(v-fv) > 1: raise ValueError('%s=%g is not consistent with other inputs (should be %d/%d = %g)' % (k,original[k],fv,windowlength,fv/windowlength))
elif v != fv:
raise ValueError("%s=%g is not consistent with other inputs (should be %d)" % (k,v,fv))
return t0,final
def spcov(x, y=None, balance=True, spdim=1, return_trchvar=False):
"""
From data <x>, compute a spatial covariance matrix, where
"space" is the dimension of <x> denoted by <spdim> (cannot
be 0).
If labels <y> are supplied and <balance> is set to True,
there is the opportunity to balance the computation: then,
covariance matrices are computed separately on each class,
and averaged at the end.
"""###
n = {}; c = {}
axes = None
if spdim < 1: raise ValueError("spdim cannot be <1 (exemplar dim is assumed to be 0, so that x[i] is exemplar i)")
ntr, nch = x.shape[0],x.shape[spdim]
if return_trchvar: trchvar = numpy.zeros((ntr,nch), x.dtype)
spdim -= 1 # since we will be operating on each x[i]
for i,xi in enumerate(x):
denom = 1.0
xim = xi = numpy.asarray(xi)
if axes == None: axes = [axis for axis in range(len(xi.shape)) if axis != spdim]
for axis in axes:
xim = numpy.expand_dims(xim.mean(axis=axis), axis)
denom *= xi.shape[axis]
xi = xi - xim
ci = numpy.tensordot(xi, xi, axes=(axes,axes))
if y == None or not balance: yi = 0
else: yi = y[i]
c[yi] = c.get(yi, 0.0) + ci
n[yi] = n.get(yi, 0) + denom
if return_trchvar: trchvar[i,:].flat = ci.diagonal().flat
if balance:
for k,v in list(c.items()): v /= float(n[k])
c = sum(c.values()) / float(len(n))
else:
c = sum(c.values()) / float(sum(n.values()))
c = numpy.asmatrix(c)
if return_trchvar: return c, trchvar
else: return c
def shrinkcov(cov, gamma, nu='mean', copy=False):
"""
Shrink a covariance matrix <cov> towards a sphere.
<gamma> is the degree of shrinkage (0.0 for no change,
1.0 for complete shrinkage to a sphere). <nu> is the
variance of the sphere: if nu='mean', then use the
mean of the diagonal elements of <cov> on input.
For copy=False, modify <cov> in place and return it.
For copy=True, operate on and return a copy of <cov>.
"""###
if not 0.0 <= gamma <= 1.0: raise ValueError("illegal shrinkage value")
if not isinstance(cov, numpy.ndarray): cov = numpy.array(cov, dtype=numpy.float64)
if nu == 'mean': nu = cov.diagonal().mean()
elif nu == 'diag': nu = cov.diagonal()
if isinstance(nu, (list,tuple,numpy.ndarray)): nu = numpy.asarray(nu, dtype=cov.dtype).flatten()
else: nu = float(nu)
if copy: cov = cov * (1.0 - gamma)
elif gamma: cov *= 1.0 - gamma
if gamma: cov.flat[::cov.shape[1]+1] += gamma * nu
return cov
def spfilt(x, W, copy=False):
"""
Each x[i] is a space-by-time signal array. W is a spatial filtering
matrix with one filter per *row* (so each W[i] is a filter).
For copy=False, modify x in place and return it (only possible if
W is square). For copy=True, operate on and return a new array.
"""###
for i,xi in enumerate(x):
Wxi = numpy.dot(W, xi)
if i == 0:
xout = x
if copy: xout = numpy.zeros((len(x),)+Wxi.shape, dtype=x.dtype)
elif xout[0].shape != Wxi.shape: raise ValueError("in-place spatial-filtering only works with square W: otherwise must call with copy=True")
xout[i].flat = Wxi.flat
return xout
def symwhiten(x, cov=None, gamma=0.0, copy=False, **kwargs):
"""
If <cov> is supplied, use that, otherwise estimate a spatial
covariance matrix by calling spcov() on the data <x>, passing
through any other keyword args supplied to that.
First, if gamma > 0.0, obtain a shrunken copy of <cov> by
calling shrinkcov() with that <gamma> setting.
Take the (symmetrical) matrix-square-root of the inverse of
the resulting (shrunken, or not) covariance matrix, and use
that to spatially filter the data with spfilt().
Return (x,W) where x is the spatially filtered data and W is
the matrix of spatial filters (one per row).
"""###
if cov == None:
cov = spcov(x=x, **kwargs)
cov = shrinkcov(cov, gamma=gamma, copy=False)
else:
cov = shrinkcov(cov, gamma=gamma, copy=True)
W = svd(cov).isqrtm
x = spfilt(x, W, copy=copy)
return x,W
def symwhitenkern(x, x2=None, gamma=0.0, cov=None):
if x2 == None:
x,W = symwhiten(x, copy=True, cov=cov, gamma=gamma)
else:
x2,W = symwhiten(x2, copy=True, cov=cov, gamma=gamma)
x = spfilt(x, W, copy=True)
return linkern(x=x, x2=x2)
def stfac(Gp, Ps=None, Pt=None, maxrank=numpy.inf):
"""
Gp: S x T weights in preconditioned space
Ps: S x S spatial preconditioner (e.g. whitener), default = eye(S)
Pt: T x T temporal preconditioner, default = eye(T)
"""###
Gp = numpy.asmatrix(Gp)
S,T = Gp.shape
if Ps == None: Ps = numpy.eye(S, dtype=numpy.float64)
if Pt == None: Pt = numpy.eye(T, dtype=numpy.float64)
u = sstruct()
u.Ps = numpy.asmatrix(Ps)
u.Pt = numpy.asmatrix(Pt)
u.G = u.Ps * Gp * u.Pt.H
u.Gp = Gp
decomp = svd(u.Gp)
u.Rs, u.Rt = decomp.U,decomp.V
u.sv = decomp.s
nfac = sum(u.sv/(max(S,T)*max(u.sv)) > 1e-8)
nfac = min(nfac, maxrank)
u.Rs = u.Rs[:, :nfac]
u.Rt = u.Rt[:, :nfac]
u = stfac_filters_and_patterns(u, D=numpy.diag(u.sv[:nfac]), S=numpy.eye(nfac, dtype=numpy.float64))
return u
def stfac_filters_and_patterns(u, D=None, S=None, B=None, Ss=None, St=None):
dt = numpy.float64
nfac = u.Rs.shape[1]
I = numpy.eye(nfac, dtype=dt)
if D == None: D = numpy.asmatrix(u._getfield('D', I.copy()), dtype=dt)
if S == None: S = numpy.asmatrix(u._getfield('S', I.copy()), dtype=dt)
if B == None: B = numpy.asmatrix(u._getfield('B', I.copy()), dtype=dt)
if Ss == None: Ss = numpy.asmatrix(u._getfield('St', I.copy()), dtype=dt)
if St == None: St = numpy.asmatrix(u._getfield('St', I.copy()), dtype=dt)
if not isequal(D, numpy.diag(numpy.diag(D ))): raise ValueError('D must be a diagonal matrix')
if not isequal(S, numpy.diag(numpy.diag(S ))): raise ValueError('S must be a diagonal matrix')
if not isequal(Ss, numpy.diag(numpy.diag(Ss))): raise ValueError('Ss must be a diagonal matrix')
if not isequal(St, numpy.diag(numpy.diag(St))): raise ValueError('St must be a diagonal matrix')
u.S = numpy.asmatrix(S, dtype=dt)
u.B = numpy.asmatrix(B, dtype=dt)
u.Ss = numpy.asmatrix(Ss, dtype=dt)
u.St = numpy.asmatrix(St, dtype=dt)
u_B_I = u.B.I
u_S_I = u.S.I
u_Ps_I = u.Ps.I
u_Pt_I = u.Pt.I
u.Ws = u.Ps * u.Rs * u.S * u.B.H * u.Ss
u.Wt = u.Pt * u.Rt * u.S * u_B_I * u.St
u.As = u_Ps_I.H * u.Rs * u_S_I * u_B_I * u.Ss.I
u.At = u_Pt_I.H * u.Rt * u_S_I * u.B.H * u.St.I
u.D = numpy.asmatrix(D)
u.d = numpy.diag(u.D.A)
u.H = u.As * u.D * u.At.H
u.Q = u.As * u.D.I * u.At.H
u.G = u.Ws * u.D * u.Wt.H # should already be the case, unless (for example) rank was explicitly reduced since Gp and G were computed
u.Gp = u_Ps_I * u.G * u_Pt_I.H
return u
def correlate(x, y, axis=0):
x = numpy.asarray(x, dtype=float).view()
y = numpy.asarray(y, dtype=float).view()
x.shape = list(x.shape) + [1] * (len(y.shape) - len(x.shape))
y.shape = list(y.shape) + [1] * (len(x.shape) - len(y.shape))
xm = numpy.expand_dims( x.mean(axis=axis), axis )
ym = numpy.expand_dims( y.mean(axis=axis), axis )
xs = numpy.expand_dims( x.std(axis=axis), axis )
ys = numpy.expand_dims( y.std(axis=axis), axis )
x = x - xm
y = y - ym
x = x / xs
y = y / ys
return (x * y).mean(axis=axis)
def correlation_pvalue(r, n, two_tailed=True ):
r = numpy.asarray( r, float )
n = numpy.asarray( n, float )
dof = n - 2.0
t = r / ( ( 1.0 - r ** 2.0 ) / dof ) ** 0.5
import scipy.stats
p = scipy.stats.t( dof ).cdf( t )
positive = ( r >= 0.0 ).astype( float )
p = positive + ( 1.0 - 2.0 * positive ) * p
if two_tailed: p *= 2.0
return p | PypiClean |
/BobBuildTool-0.23.1.tar.gz/BobBuildTool-0.23.1/pym/bob/intermediate.py |
import asyncio
import hashlib
import os.path
import struct
from abc import ABC, abstractmethod
from .input import DigestHasher
from .languages import getLanguage, ScriptLanguage
from .scm import getScm, ScmOverride
from .state import BobState
from .utils import asHexStr, getPlatformTag
# Fully dumped: Package-/Build-/Checkout-Step of built package
# Partially dumped: everything else
# packageStep: variantId, workspacePath, package, isRelocatable, sandbox
# package: packageStep, recipe, name
# sandbox: None/!None
class AbstractIR(ABC):
@abstractmethod
def mungeStep(self, step):
return step
@abstractmethod
def mungePackage(self, package):
return package
@abstractmethod
def mungeRecipe(self, recipe):
return recipe
@abstractmethod
def mungeSandbox(self, sandbox):
return sandbox
@abstractmethod
def mungeTool(self, tool):
return tool
@abstractmethod
def mungeRecipeSet(self, recipeSet):
return recipeSet
class StepIR(AbstractIR):
@classmethod
def fromStep(cls, step, graph, partial=False):
self = cls()
self.__data = {}
self.__data['partial'] = partial
self.__data['variantId'] = step.getVariantId().hex()
self.__data['package'] = graph.addPackage(step.getPackage(), partial)
self.__data['valid'] = step.isValid()
self.__data['workspacePath'] = step.getWorkspacePath()
self.__data['isCheckoutStep'] = step.isCheckoutStep()
self.__data['isBuildStep'] = step.isBuildStep()
self.__data['isPackageStep'] = step.isPackageStep()
self.__data['isRelocatable'] = step.isRelocatable()
self.__data['isShared'] = step.isShared()
self.__data['sandbox'] = (
graph.addSandbox(step.getSandbox(False)),
graph.addSandbox(step.getSandbox(True))
)
if not partial:
self.__data['isFingerprinted'] = step._isFingerprinted()
self.__data['digestScript'] = step.getDigestScript()
self.__data['tools'] = { name : graph.addTool(tool) for name, tool in step.getTools().items() }
self.__data['arguments'] = [ graph.addStep(a, a.getPackage() != step.getPackage()) for a in step.getArguments() ]
self.__data['allDepSteps'] = (
[ graph.addStep(a, a.getPackage() != step.getPackage()) for a in step.getAllDepSteps(False) ],
[ graph.addStep(a, a.getPackage() != step.getPackage()) for a in step.getAllDepSteps(True) ]
)
self.__data['env'] = step.getEnv()
if self.JENKINS:
self.__data['preRunCmds'] = step.getJenkinsPreRunCmds()
else:
self.__data['preRunCmds'] = step.getPreRunCmds()
self.__data['postRunCmds'] = step.getPostRunCmds()
self.__data['setupScript'] = step.getSetupScript()
self.__data['mainScript'] = step.getMainScript()
self.__data['updateScript'] = step.getUpdateScript()
self.__data['fingerprintScript'] = step._getFingerprintScript()
self.__data['jobServer'] = step.jobServer()
self.__data['label'] = step.getLabel()
self.__data['isDeterministic'] = step.isDeterministic()
self.__data['isUpdateDeterministic'] = step.isUpdateDeterministic()
self.__data['hasNetAccess'] = step.hasNetAccess()
if self.__data['isCheckoutStep']:
self.__data['hasLiveBuildId'] = step.hasLiveBuildId()
self.__data['scmList'] = [
(s.getProperties(self.JENKINS), [ o.__getstate__() for o in s.getActiveOverrides()])
for s in step.getScmList()
]
self.__data['scmDirectories'] = { d : (h.hex(), p) for (d, (h, p)) in step.getScmDirectories().items() }
self.__data['sandboxVariantId'] = step._getSandboxVariantId().hex()
self.__data['toolKeysWeak'] = sorted(step._coreStep._getToolKeysWeak())
self.__data['digestEnv'] = step._coreStep.digestEnv
return self
@classmethod
def fromData(cls, data):
self = cls()
self.__data = data
return self
def toData(self):
return self.__data
def __hash__(self):
return hash(self.__data['variantId'])
def __lt__(self, other):
return self.getVariantId() < other.getVariantId()
def __le__(self, other):
return self.getVariantId() <= other.getVariantId()
def __eq__(self, other):
return self.getVariantId() == other.getVariantId()
def __ne__(self, other):
return self.getVariantId() != other.getVariantId()
def __gt__(self, other):
return self.getVariantId() > other.getVariantId()
def __ge__(self, other):
return self.getVariantId() >= other.getVariantId()
@property
def partial(self):
return self.__data['partial']
def getPackage(self):
return self.mungePackage(self.__data['package'])
def isValid(self):
return self.__data['valid']
def isShared(self):
return self.__data['isShared']
def getWorkspacePath(self):
return self.__data['workspacePath']
def getExecPath(self, referrer=None):
"""Return the execution path of the step.
The execution path is where the step is actually run. It may be distinct
from the workspace path if the build is performed in a sandbox. The
``referrer`` is an optional parameter that represents a step that refers
to this step while building.
"""
if self.isValid():
if (referrer or self).getSandbox() is None:
return self.getStoragePath()
else:
return os.path.join("/bob", asHexStr(self.getVariantId()), "workspace")
else:
return "/invalid/exec/path/of/{}".format(self.getPackage().getName())
def getStoragePath(self):
"""Return the storage path of the step.
The storage path is where the files of the step are stored. For
checkout and build steps this is always the workspace path. But package
steps can be shared globally and thus the directory may lie outside of
the project directoy. The storage path may also change between
invocations if the shared location changes.
"""
if self.isPackageStep() and self.isShared():
return BobState().getStoragePath(self.getWorkspacePath())
else:
return self.getWorkspacePath()
def getSandbox(self, forceSandbox=False):
return self.mungeSandbox(self.__data['sandbox'][1 if forceSandbox else 0])
def getVariantId(self):
return bytes.fromhex(self.__data['variantId'])
def isCheckoutStep(self):
return self.__data['isCheckoutStep']
def isBuildStep(self):
return self.__data['isBuildStep']
def isPackageStep(self):
return self.__data['isPackageStep']
def _isFingerprinted(self):
return self.__data['isFingerprinted']
def isRelocatable(self):
return self.__data['isRelocatable']
def getDigestScript(self):
return self.__data['digestScript']
def getTools(self):
return { name : self.mungeTool(tool) for name, tool in self.__data['tools'].items() }
def getArguments(self):
return [ self.mungeStep(arg) for arg in self.__data['arguments'] ]
def getAllDepSteps(self, forceSandbox=False):
return [ self.mungeStep(dep) for dep in
self.__data['allDepSteps'][1 if forceSandbox else 0] ]
def getEnv(self):
return self.__data['env']
def getPaths(self):
# FIXME: rename to getToolPaths
"""Get sorted list of execution paths to used tools.
The returned list is intended to be passed as PATH environment variable.
The paths are sorted by name.
"""
return sorted([ os.path.join(tool.getStep().getExecPath(self), tool.getPath())
for tool in self.getTools().values() ])
def getLibraryPaths(self):
"""Get sorted list of library paths of used tools.
The returned list is intended to be passed as LD_LIBRARY_PATH environment
variable. The paths are first sorted by tool name. The order of paths of
a single tool is kept.
"""
paths = []
for (name, tool) in sorted(self.getTools().items()):
paths.extend([ os.path.join(tool.getStep().getExecPath(self), l) for l in tool.getLibs() ])
return paths
def getPreRunCmds(self):
assert not self.JENKINS
return self.__data['preRunCmds']
def getJenkinsPreRunCmds(self):
assert self.JENKINS
return self.__data['preRunCmds']
def getPostRunCmds(self):
return self.__data['postRunCmds']
def getSetupScript(self):
return self.__data['setupScript']
def getMainScript(self):
return self.__data['mainScript']
def getUpdateScript(self):
return self.__data['updateScript']
def _getFingerprintScript(self):
return self.__data['fingerprintScript']
def jobServer(self):
return self.__data['jobServer']
def getLabel(self):
return self.__data['label']
def isDeterministic(self):
return self.__data['isDeterministic']
def isUpdateDeterministic(self):
return self.__data['isUpdateDeterministic']
def hasLiveBuildId(self):
return self.__data['hasLiveBuildId']
def hasNetAccess(self):
return self.__data['hasNetAccess']
def getScmList(self):
recipeSet = self.getPackage().getRecipe().getRecipeSet()
def deserialize(state):
ret = ScmOverride.__new__(ScmOverride)
ret.__setstate__(state)
return ret
return [ getScm(scm, [deserialize(o) for o in overrides], recipeSet)
for scm, overrides in self.__data['scmList'] ]
def getScmDirectories(self):
return { d : (bytes.fromhex(h), p) for (d, (h, p)) in self.__data['scmDirectories'].items() }
def mayUpdate(self, inputChanged, oldHash, rehash):
if any((s.isLocal() and not s.isDeterministic()) for s in self.getScmList()):
return True
if not self.getUpdateScript():
return False
if not self.isUpdateDeterministic() or inputChanged:
return True
return rehash() != oldHash
def _getSandboxVariantId(self):
return bytes.fromhex(self.__data['sandboxVariantId'])
async def getDigestCoro(self, calculate, forceSandbox=False, hasher=DigestHasher,
fingerprint=None, platform=b'', relaxTools=False):
h = hasher()
h.update(platform)
if self._isFingerprinted() and self.getSandbox() \
and not self.getPackage().getRecipe().getRecipeSet().sandboxFingerprints:
[d] = await calculate([self.getSandbox().getStep()])
h.fingerprint(hasher.sliceRecipes(d))
elif fingerprint:
h.fingerprint(fingerprint)
sandbox = not self.getPackage().getRecipe().getRecipeSet().sandboxInvariant and \
self.getSandbox(forceSandbox)
if sandbox:
[d] = await calculate([sandbox.getStep()])
h.update(hasher.sliceRecipes(d))
h.update(struct.pack("<I", len(sandbox.getPaths())))
for p in sandbox.getPaths():
h.update(struct.pack("<I", len(p)))
h.update(p.encode('utf8'))
else:
h.update(b'\x00' * 20)
script = self.getDigestScript()
if script:
h.update(struct.pack("<I", len(script)))
h.update(script.encode("utf8"))
else:
h.update(b'\x00\x00\x00\x00')
tools = self.getTools()
weakTools = set(self.__data['toolKeysWeak']) if relaxTools else []
h.update(struct.pack("<I", len(tools)))
tools = sorted(tools.items(), key=lambda t: t[0])
toolsDigests = await calculate([ tool.getStep() for name,tool in tools ])
for ((name, tool), d) in zip(tools, toolsDigests):
if name in weakTools:
h.update(name.encode('utf8'))
else:
h.update(hasher.sliceRecipes(d))
h.update(struct.pack("<II", len(tool.getPath()), len(tool.getLibs())))
h.update(tool.getPath().encode("utf8"))
for l in tool.getLibs():
h.update(struct.pack("<I", len(l)))
h.update(l.encode('utf8'))
h.update(struct.pack("<I", len(self.__data['digestEnv'])))
for (key, val) in sorted(self.__data['digestEnv'].items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
args = [ a for a in self.getArguments() if a.isValid() ]
argsDigests = await calculate(args)
h.update(struct.pack("<I", len(args)))
for d in argsDigests:
h.update(hasher.sliceRecipes(d))
h.fingerprint(hasher.sliceHost(d))
return h.digest()
async def predictLiveBuildId(self):
"""Query server to predict live build-id.
Returns the live-build-id or None if an SCM query failed.
"""
if not self.hasLiveBuildId():
return None
h = hashlib.sha1()
h.update(getPlatformTag())
h.update(self._getSandboxVariantId())
for s in self.getScmList():
liveBId = await s.predictLiveBuildId(self)
if liveBId is None: return None
h.update(liveBId)
return h.digest()
def calcLiveBuildId(self):
"""Calculate live build-id from workspace."""
if not self.hasLiveBuildId():
return None
workspacePath = self.getWorkspacePath()
h = hashlib.sha1()
h.update(getPlatformTag())
h.update(self._getSandboxVariantId())
for s in self.getScmList():
liveBId = s.calcLiveBuildId(workspacePath)
if liveBId is None: return None
h.update(liveBId)
return h.digest()
def getUpdateScriptDigest(self):
"""Return a digest that tracks relevant changes to the update script behaviour"""
h = hashlib.sha1()
script = self.getUpdateScript()
if script:
h.update(struct.pack("<I", len(script)))
h.update(script.encode("utf8"))
else:
h.update(b'\x00\x00\x00\x00')
h.update(struct.pack("<I", len(self.__data['digestEnv'])))
for (key, val) in sorted(self.__data['digestEnv'].items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
return h.digest()
class PackageIR(AbstractIR):
@classmethod
def fromPackage(cls, package, graph, partial=False):
self = cls()
self.__data = {}
self.__data['partial'] = partial
self.__data['stack'] = package.getStack()
self.__data['recipe'] = graph.addRecipe(package.getRecipe())
self.__data['name'] = package.getName()
self.__data['packageStep'] = graph.addStep(package.getPackageStep(), partial)
self.__data['metaEnv'] = package.getMetaEnv()
if not partial:
self.__data['buildStep'] = graph.addStep(package.getBuildStep(), False)
self.__data['checkoutStep'] = graph.addStep(package.getCheckoutStep(), False)
return self
@classmethod
def fromData(cls, data):
self = cls()
self.__data = data
return self
def toData(self):
return self.__data
def __eq__(self, other):
return isinstance(other, PackageIR) and (self.__data['stack'] == other.__data['stack'])
@property
def partial(self):
return self.__data['data']['partial']
def getRecipe(self):
return self.mungeRecipe(self.__data['recipe'])
def getCheckoutStep(self):
return self.mungeStep(self.__data['checkoutStep'])
def getBuildStep(self):
return self.mungeStep(self.__data['buildStep'])
def getPackageStep(self):
return self.mungeStep(self.__data['packageStep'])
def getStack(self):
return self.__data['stack']
def getName(self):
return self.__data['name']
def getMetaEnv(self):
return self.__data['metaEnv']
class SandboxIR(AbstractIR):
@classmethod
def fromSandbox(cls, sandbox, graph):
self = cls()
self.__data = {}
self.__data['step'] = graph.addStep(sandbox.getStep(), True)
self.__data['paths'] = sandbox.getPaths()
self.__data['mounts'] = sandbox.getMounts()
return self
@classmethod
def fromData(cls, data):
self = cls()
self.__data = data
return self
def toData(self):
return self.__data
def getStep(self):
return self.mungeStep(self.__data['step'])
def getPaths(self):
return self.__data['paths']
def getMounts(self):
return self.__data['mounts']
class ToolIR(AbstractIR):
@classmethod
def fromTool(cls, tool, graph):
self = cls()
self.__data = {}
self.__data['step'] = graph.addStep(tool.getStep(), True)
self.__data['path'] = tool.getPath()
self.__data['libs'] = tool.getLibs()
return self
@classmethod
def fromData(cls, data):
self = cls()
self.__data = data
return self
def toData(self):
return self.__data
def getStep(self):
return self.mungeStep(self.__data['step'])
def getPath(self):
return self.__data['path']
def getLibs(self):
return self.__data['libs']
class RecipeIR(AbstractIR):
@classmethod
def fromRecipe(cls, recipe, graph):
self = cls()
self.__data = {}
self.__data['recipeSet'] = graph.addRecipeSet(recipe.getRecipeSet())
self.__data['scriptLanguage'] = recipe.scriptLanguage.index.value
self.__data['name'] = recipe.getName()
self.__data['layer'] = recipe.getLayer()
return self
@classmethod
def fromData(cls, data):
self = cls()
self.__data = data
return self
def toData(self):
return self.__data
def getRecipeSet(self):
return self.mungeRecipeSet(self.__data['recipeSet'])
def getName(self):
return self.__data['name']
def getLayer(self):
return self.__data['layer']
@property
def scriptLanguage(self):
return getLanguage(ScriptLanguage(self.__data['scriptLanguage']))
class RecipeSetIR:
@classmethod
def fromRecipeSet(cls, recipeSet):
self = cls()
self.__data = {}
self.__data['sandboxInvariant'] = recipeSet.sandboxInvariant
self.__data['sandboxFingerprints'] = recipeSet.sandboxFingerprints
self.__data['policies'] = {
# FIXME: lazily query policies and only add them all in toData()
'allRelocatable' : recipeSet.getPolicy('allRelocatable'),
'pruneImportScm' : recipeSet.getPolicy('pruneImportScm'),
'scmIgnoreUser' : recipeSet.getPolicy('scmIgnoreUser'),
'secureSSL' : recipeSet.getPolicy('secureSSL'),
'tidyUrlScm' : recipeSet.getPolicy('tidyUrlScm'),
'sandboxFingerprints' : recipeSet.getPolicy('sandboxFingerprints'),
'gitCommitOnBranch' : recipeSet.getPolicy('gitCommitOnBranch'),
'fixImportScmVariant' : recipeSet.getPolicy('fixImportScmVariant'),
}
self.__data['archiveSpec'] = recipeSet.archiveSpec()
self.__data['envWhiteList'] = sorted(recipeSet.envWhiteList())
self.__data['projectRoot'] = recipeSet.getProjectRoot()
return self
@classmethod
def fromData(cls, data):
self = cls()
self.__data = data
return self
def toData(self):
return self.__data
@property
def sandboxInvariant(self):
return self.__data['sandboxInvariant']
@property
def sandboxFingerprints(self):
return self.__data['sandboxFingerprints']
def archiveSpec(self):
return self.__data['archiveSpec']
def envWhiteList(self):
return set(self.__data['envWhiteList'])
def getPolicy(self, name, location=None):
return self.__data['policies'][name]
def getProjectRoot(self):
return self.__data['projectRoot'] | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/paper-fab/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/paper-fab/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/ExtProxy-1.0.3.tar.gz/ExtProxy-1.0.3/README.md | # ExtProxy
[](https://github.com/SeaHOH/extproxy/blob/master/LICENSE)
[](https://github.com/SeaHOH/extproxy/releases)
[](https://github.com/SeaHOH/extproxy)
ExtProxy extend urllib2's ProxyHandler to support extra proxy types: HTTPS, SOCKS. It provides a consistent user experience like HTTP proxy for the users.
This script is using a non-side-effects monkey patch, it did not applied to build-in module socket, just inject some codes into `Request`, `ProxyHandler`, `HTTPConnection`, `SSLContext` method's processing. Don't need to worry about the patching, you can using everything like before, or you can unpatch it at any time.
# Installation
Install from
[](https://pypi.org/project/ExtProxy/)
[](https://pypi.org/project/ExtProxy/#files)
[](https://pypi.org/project/ExtProxy/#files)
pip install ExtProxy
Or download and Install from source code
python setup.py install
# Compatibility
- Python >= 2.7
- Require PySocks to support SOCKS proxy type
# Usage
```py
# Target can be imported before monkey patching
from urllib.request import urlopen, build_opener, ProxyHandler
# Import extproxy, auto apply monkey patching by `extproxy.patch_items`
import extproxy
# Use origin HTTP proxy
proxy = "http://127.0.0.1:8080"
# Use HTTPS proxy, use `set_https_proxy` to custom proxy's SSL verify mode
import ssl
proxy = "https://127.0.0.1:8443"
cafile = "cafile path"
set_https_proxy(proxy, check_hostname=False, cafile=cafile)
context_settings = {
"protocol": ssl.PROTOCOL_TLSv1_2,
"cert_reqs": ssl.CERT_REQUIRED, #
"check_hostname": True, #
"cafile": "cafile path", #
"capath": "cafiles dir path", #
"cadata": b"ca data" # Uesd to server auth
"certfile": "certfile path", #
"keyfile": "keyfile path", # Uesd to client auth
}
context = ssl._create_unverified_context(**context_settings)
... # More custom settings
set_https_proxy(proxy, context=context)
# Use SOCKS proxy, `socks` can be: socks, socks4, socks4a, socks5, socks5h
# SOCKS4 does not support remote resolving, but SOCKS4a/5 supported
# 'socks' means SOCKS5, 'socks5h' means do not use remote resolving
proxy = "socks://127.0.0.1:1080"
# Set proxy via system/python environment variables
import os
os.environ["HTTP_PROXY"] = proxy
os.environ["HTTPS_PROXY"] = proxy
print(urlopen("https://httpbin.org/ip").read().decode())
# Set proxy via ProxyHandler
opener = build_opener(ProxyHandler({
"http": proxy,
"https": proxy
}))
print(opener.open("https://httpbin.org/ip").read().decode())
# Restore monkey patch, then HTTPS, SOCKS proxy use can not continue working
extproxy.restore_items()
```
# License
ExtProxy is released under the [MIT License](https://github.com/SeaHOH/extproxy/blob/master/LICENSE).
| PypiClean |
/MatchZoo-2.2.0.tar.gz/MatchZoo-2.2.0/matchzoo/contrib/models/bimpm.py |
from keras.models import Model
from keras.layers import Dense, Concatenate, Dropout
from keras.layers import Bidirectional, LSTM
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.base_model import BaseModel
from matchzoo.contrib.layers import MultiPerspectiveLayer
class BiMPM(BaseModel):
"""
BiMPM.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/SentenceMatchModelGraph.py#L43-L186
Examples:
>>> import matchzoo as mz
>>> model = mz.contrib.models.BiMPM()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params['optimizer'] = 'adam'
# params.add(Param('dim_word_embedding', 50))
# TODO(tjf): remove unused params in the final version
# params.add(Param('dim_char_embedding', 50))
# params.add(Param('word_embedding_mat'))
# params.add(Param('char_embedding_mat'))
# params.add(Param('embedding_random_scale', 0.2))
# params.add(Param('activation_embedding', 'softmax'))
# BiMPM Setting
params.add(Param('perspective', {'full': True,
'max-pooling': True,
'attentive': True,
'max-attentive': True}))
params.add(Param('mp_dim', 3))
params.add(Param('att_dim', 3))
params.add(Param('hidden_size', 4))
params.add(Param('dropout_rate', 0.0))
params.add(Param('w_initializer', 'glorot_uniform'))
params.add(Param('b_initializer', 'zeros'))
params.add(Param('activation_hidden', 'linear'))
params.add(Param('with_match_highway', False))
params.add(Param('with_aggregation_highway', False))
return params
def build(self):
"""Build model structure."""
# ~ Input Layer
input_left, input_right = self._make_inputs()
# Word Representation Layer
# TODO: concatenate word level embedding and character level embedding.
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
# L119-L121
# https://github.com/zhiguowang/BiMPM/blob/master/src/SentenceMatchModelGraph.py#L119-L121
embed_left = Dropout(self._params['dropout_rate'])(embed_left)
embed_right = Dropout(self._params['dropout_rate'])(embed_right)
# ~ Word Level Matching Layer
# Reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L207-L223
# TODO
pass
# ~ Encoding Layer
# Note: When merge_mode = None, output will be [forward, backward],
# The default merge_mode is concat, and the output will be [lstm].
# If with return_state, then the output would append [h,c,h,c].
bi_lstm = Bidirectional(
LSTM(self._params['hidden_size'],
return_sequences=True,
return_state=True,
dropout=self._params['dropout_rate'],
kernel_initializer=self._params['w_initializer'],
bias_initializer=self._params['b_initializer']),
merge_mode='concat')
# x_left = [lstm_lt, forward_h_lt, _, backward_h_lt, _ ]
x_left = bi_lstm(embed_left)
x_right = bi_lstm(embed_right)
# ~ Multi-Perspective Matching layer.
# Output is two sequence of vectors.
# Cons: Haven't support multiple context layer
multi_perspective = MultiPerspectiveLayer(self._params['att_dim'],
self._params['mp_dim'],
self._params['perspective'])
# Note: input to `keras layer` must be list of tensors.
mp_left = multi_perspective(x_left + x_right)
mp_right = multi_perspective(x_right + x_left)
# ~ Dropout Layer
mp_left = Dropout(self._params['dropout_rate'])(mp_left)
mp_right = Dropout(self._params['dropout_rate'])(mp_right)
# ~ Highway Layer
# reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L289-L295
if self._params['with_match_highway']:
# the input is left matching representations (question / passage)
pass
# ~ Aggregation layer
# TODO: mask the above layer
aggregation = Bidirectional(
LSTM(self._params['hidden_size'],
return_sequences=False,
return_state=False,
dropout=self._params['dropout_rate'],
kernel_initializer=self._params['w_initializer'],
bias_initializer=self._params['b_initializer']),
merge_mode='concat')
rep_left = aggregation(mp_left)
rep_right = aggregation(mp_right)
# Concatenate the concatenated vector of left and right.
x = Concatenate()([rep_left, rep_right])
# ~ Highway Network
# reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L289-L295
if self._params['with_aggregation_highway']:
pass
# ~ Prediction layer.
# reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/SentenceMatchModelGraph.py#L140-L153
x = Dense(self._params['hidden_size'],
activation=self._params['activation_hidden'])(x)
x = Dense(self._params['hidden_size'],
activation=self._params['activation_hidden'])(x)
x_out = self._make_output_layer()(x)
self._backend = Model(inputs=[input_left, input_right],
outputs=x_out) | PypiClean |
/Data_mining_platform-1.2.tar.gz/Data_mining_platform-1.2/codes/FeatureSelect.py | from sklearn.model_selection import GridSearchCV,cross_val_score, ShuffleSplit
from sklearn.ensemble import RandomForestRegressor ,RandomForestClassifier
import numpy as np
import pandas as pd
from codes.Tools import *
from scipy.stats import pearsonr
from minepy import MINE
import dcor
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as Lda
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold,SelectKBest,chi2,SelectFromModel,RFE
import warnings
warnings.filterwarnings('ignore')
class Filter():
def __init__(self,strategy,threshold_s,bestk):
self.strategy = strategy #### 1:移除低方差 ,2:卡方检验 , 3:Pearson相关系数 ,4:最大信息系数 ,5:距离相关系数 ,6:基于模型,回归 ,7:基于模型 分类
self.threshold_s = threshold_s
self.bestk = bestk
def fit(self,df_s,cols):
df = df_s
if self.strategy == 1:
vc = VarianceThreshold(threshold=self.threshold_s)
vc.fit(df[cols])
n = len(cols)
self.delete_lst = []
for i in range(n):
if not vc.get_support()[i]:
if dcor.distance_correlation(df[cols[i]], df['y_label']) <0.05:
self.delete_lst.append(cols[i])
elif self.strategy == 2:
cc = SelectKBest(chi2,self.bestk)
cc.fit(df[cols],df['y_label'])
n = len(cols)
self.delete_lst = []
for i in range(n):
if not cc.get_support()[i]:
self.delete_lst.append(cols[i])
elif self.strategy == 3:
self.delete_lst = []
for col in cols :
if pearsonr(df[col],df['y_label'])[1].abs()<self.threshold_s:
self.delete_lst.append(col)
elif self.strategy == 4:
self.delete_lst = []
m = MINE()
for col in cols :
if m.compute_score(df[col],df['y_label']).mic() <self.threshold_s:
self.delete_lst.append(col)
elif self.strategy == 5:
self.delete_lst = []
for col in cols :
if dcor.distance_correlation(df[col],df['y_label']).mic() <self.threshold_s:
self.delete_lst.append(col)
elif self.strategy == 6: ##### 回归
self.delete_lst = []
for col in cols :
rf = RandomForestRegressor(n_estimators=20, max_depth=4)
for col in cols:
score = cross_val_score(rf, df[col], df['y_label'], scoring="r2", cv=ShuffleSplit(df.shape[0], 3, .3))
if score <self.threshold_s:
self.delete_lst.append(col)
elif self.strategy == 7: ##### 分类
self.delete_lst = []
for col in cols :
rf = RandomForestClassifier(n_estimators=20, max_depth=4)
for col in cols:
score = cross_val_score(rf, df[col], df['y_label'], scoring="auc", cv=ShuffleSplit(df.shape[0], 3, .3))
if score <self.threshold_s:
self.delete_lst.append(col)
else:
return {'status': 1, 'msg': 'Ineffective strategy', 'res': None}
return {'status': 0, 'msg': '', 'res': None}
def transform(self,df_s,cols):
df = df_s.copy()
df = df.loc[:,~df.columns.isin(self.delete_lst)]
return {'status': 0, 'msg': '', 'res': df}
def sava_model(self):
return {'status': 0, 'msg': '', 'res': self.delete_lst}
class Wrapper(): ############### 特征递归消除
def __init__(self,strategy,bestk):
self.strategy = strategy ### 1:回归,2:分类
self.bestk = bestk
def fit(self,df_s,cols):
df = df_s.copy()
if self.strategy == 1:
rfe = RFE(estimator=RandomForestRegressor(n_estimators=100,max_depth='sqrt'), n_features_to_select=self.bestk)
elif self.strategy == 2:
rfe = RFE(estimator=RandomForestClassifier(n_estimators=100, max_depth='sqrt'),n_features_to_select=self.bestk)
rfe.fit(df[cols],df['y_label'])
n = len(cols)
self.delete_lst = []
for i in range(n):
if not rfe.get_support()[i]:
self.delete_lst.append(cols[i])
return {'status': 0, 'msg': '', 'res': None}
def transform(self,df_s,cols):
df = df_s.copy()
df = df.loc[:,~df.columns.isin(self.delete_lst)]
return {'status': 0, 'msg': '', 'res': df}
def save_model(self):
return {'status': 0, 'msg': '', 'res': self.delete_lst}
class Embedded():
def __init__(self,strategy,base_model,threshold_s ): ## 来选择不为0的系数
#self.strategy = strategy ### 1:L1 ,2:稀疏矩阵 ,3:基于树
self.base_model = base_model ## 1:{ 1:svc ,2:lasso,3:lr }
#self.threshold_s = threshold_s
def fit(self,df_s,cols):
df = df_s.copy()
if self.strategy == 1:
##lsvc = LinearSVC(C=0.01, penalty="l1", dual=False)
## GradientBoostingClassifier()
### LogisticRegression(penalty="l1", C=0.1)
self.base_model.fit(df[cols].values, df['y_label'])
sfm = SelectFromModel(self.base_model, prefit=True)
n = len(cols)
self.delete_lst = []
for i in range(n):
if not sfm.get_support()[i]:
self.delete_lst.append(cols[i])
return {'status': 0, 'msg': '', 'res': None}
def transform(self,df_s,cols):
df = df_s.copy()
df = df.loc[:,~df.columns.isin(self.delete_lst)]
return {'status': 0, 'msg': '', 'res': df}
def save_model(self):
return {'status': 0, 'msg': '', 'res': self.delete_lst}
class DimensionalityReduction():
def __init__(self,strategy,components):
self.strategy = strategy ###1:pca, 2:lda
self.components = components
def fit(self,df_s,cols):
df = df_s.copy()
if self.strategy == 1:
self.dr = PCA(n_components=self.components)
self.dr.fit(df[cols])
elif self.strategy == 2:
self.dr = Lda(n_components=self.components)
self.dr.fit(df[cols], df['y_label'])
return {'status': 0, 'msg': '', 'res': None}
def transform(self,df_s,cols):
df = df_s.copy()
X = self.dr.transform(df[cols])
y = df['y_label']
res ={'X':X,'y':y}
return {'status': 0, 'msg': '', 'res': res}
def save_model(self):
return {'status': 0, 'msg': '', 'res': self.dr} | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/17797.82a2f2183302fa63a7a8.min.js | "use strict";(self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[17797],{51913:function(n,e,t){var o=t(87537),s=t.n(o),r=t(23645),l=t.n(r),a=t(61667),c=t.n(a),i=new URL(t(77794),t.b),u=l()(s()),h=c()(i);u.push([n.id,".hljs{display:block;overflow-x:auto;padding:.5em;color:#dccf8f;background:url("+h+") repeat scroll left top #181914}.hljs-comment,.hljs-quote{color:#586e75;font-style:italic}.hljs-keyword,.hljs-selector-tag,.hljs-literal,.hljs-addition{color:#b64926}.hljs-number,.hljs-string,.hljs-doctag,.hljs-regexp{color:#468966}.hljs-title,.hljs-section,.hljs-built_in,.hljs-name{color:#ffb03b}.hljs-variable,.hljs-template-variable,.hljs-class .hljs-title,.hljs-type,.hljs-tag{color:#b58900}.hljs-attribute{color:#b89859}.hljs-symbol,.hljs-bullet,.hljs-link,.hljs-subst,.hljs-meta{color:#cb4b16}.hljs-deletion{color:#dc322f}.hljs-selector-id,.hljs-selector-class{color:#d3a60c}.hljs-formula{background:#073642}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:bold}","",{version:3,sources:["webpack://./node_modules/highlight.js/styles/pojoaque.css"],names:[],mappings:"AAQA,MACE,aAAA,CACA,eAAA,CACA,YAAA,CACA,aAAA,CACA,iFAAA,CAGF,0BAEE,aAAA,CACA,iBAAA,CAGF,8DAIE,aAAA,CAGF,oDAIE,aAAA,CAGF,oDAIE,aAAA,CAGF,oFAKE,aAAA,CAGF,gBACE,aAAA,CAGF,4DAKE,aAAA,CAGF,eACE,aAAA,CAGF,uCAEE,aAAA,CAGF,cACE,kBAAA,CAGF,eACE,iBAAA,CAGF,aACE,gBAAA",sourcesContent:["/*\n\nPojoaque Style by Jason Tate\nhttp://web-cms-designs.com/ftopict-10-pojoaque-style-for-highlight-js-code-highlighter.html\nBased on Solarized Style from http://ethanschoonover.com/solarized\n\n*/\n\n.hljs {\n display: block;\n overflow-x: auto;\n padding: 0.5em;\n color: #dccf8f;\n background: url(./pojoaque.jpg) repeat scroll left top #181914;\n}\n\n.hljs-comment,\n.hljs-quote {\n color: #586e75;\n font-style: italic;\n}\n\n.hljs-keyword,\n.hljs-selector-tag,\n.hljs-literal,\n.hljs-addition {\n color: #b64926;\n}\n\n.hljs-number,\n.hljs-string,\n.hljs-doctag,\n.hljs-regexp {\n color: #468966;\n}\n\n.hljs-title,\n.hljs-section,\n.hljs-built_in,\n.hljs-name {\n color: #ffb03b;\n}\n\n.hljs-variable,\n.hljs-template-variable,\n.hljs-class .hljs-title,\n.hljs-type,\n.hljs-tag {\n color: #b58900;\n}\n\n.hljs-attribute {\n color: #b89859;\n}\n\n.hljs-symbol,\n.hljs-bullet,\n.hljs-link,\n.hljs-subst,\n.hljs-meta {\n color: #cb4b16;\n}\n\n.hljs-deletion {\n color: #dc322f;\n}\n\n.hljs-selector-id,\n.hljs-selector-class {\n color: #d3a60c;\n}\n\n.hljs-formula {\n background: #073642;\n}\n\n.hljs-emphasis {\n font-style: italic;\n}\n\n.hljs-strong {\n font-weight: bold;\n}\n"],sourceRoot:""}]),e.Z=u},23645:function(n){n.exports=function(n){var e=[];return e.toString=function(){return this.map((function(e){var t="",o=void 0!==e[5];return e[4]&&(t+="@supports (".concat(e[4],") {")),e[2]&&(t+="@media ".concat(e[2]," {")),o&&(t+="@layer".concat(e[5].length>0?" ".concat(e[5]):""," {")),t+=n(e),o&&(t+="}"),e[2]&&(t+="}"),e[4]&&(t+="}"),t})).join("")},e.i=function(n,t,o,s,r){"string"==typeof n&&(n=[[null,n,void 0]]);var l={};if(o)for(var a=0;a<this.length;a++){var c=this[a][0];null!=c&&(l[c]=!0)}for(var i=0;i<n.length;i++){var u=[].concat(n[i]);o&&l[u[0]]||(void 0!==r&&(void 0===u[5]||(u[1]="@layer".concat(u[5].length>0?" ".concat(u[5]):""," {").concat(u[1],"}")),u[5]=r),t&&(u[2]?(u[1]="@media ".concat(u[2]," {").concat(u[1],"}"),u[2]=t):u[2]=t),s&&(u[4]?(u[1]="@supports (".concat(u[4],") {").concat(u[1],"}"),u[4]=s):u[4]="".concat(s)),e.push(u))}},e}},61667:function(n){n.exports=function(n,e){return e||(e={}),n?(n=String(n.__esModule?n.default:n),/^['"].*['"]$/.test(n)&&(n=n.slice(1,-1)),e.hash&&(n+=e.hash),/["'() \t\n]|(%20)/.test(n)||e.needQuotes?'"'.concat(n.replace(/"/g,'\\"').replace(/\n/g,"\\n"),'"'):n):n}},87537:function(n){n.exports=function(n){var e=n[1],t=n[3];if(!t)return e;if("function"==typeof btoa){var o=btoa(unescape(encodeURIComponent(JSON.stringify(t)))),s="sourceMappingURL=data:application/json;charset=utf-8;base64,".concat(o),r="/*# ".concat(s," */");return[e].concat([r]).join("\n")}return[e].join("\n")}},17797:function(n,e,t){t.r(e);var o=t(93379),s=t.n(o),r=t(7795),l=t.n(r),a=t(3565),c=t.n(a),i=t(19216),u=t.n(i),h=t(44589),p=t.n(h),f=t(51913),A={};A.styleTagTransform=p(),A.setAttributes=c(),A.insert=function(n){var e=document.head.querySelectorAll("*")[0];e?document.head.insertBefore(n,e):document.head.append(n)},A.domAPI=l(),A.insertStyleElement=u();s()(f.Z,A);e.default=f.Z&&f.Z.locals?f.Z.locals:void 0},93379:function(n){var e=[];function t(n){for(var t=-1,o=0;o<e.length;o++)if(e[o].identifier===n){t=o;break}return t}function o(n,o){for(var r={},l=[],a=0;a<n.length;a++){var c=n[a],i=o.base?c[0]+o.base:c[0],u=r[i]||0,h="".concat(i," ").concat(u);r[i]=u+1;var p=t(h),f={css:c[1],media:c[2],sourceMap:c[3],supports:c[4],layer:c[5]};if(-1!==p)e[p].references++,e[p].updater(f);else{var A=s(f,o);o.byIndex=a,e.splice(a,0,{identifier:h,updater:A,references:1})}l.push(h)}return l}function s(n,e){var t=e.domAPI(e);t.update(n);return function(e){if(e){if(e.css===n.css&&e.media===n.media&&e.sourceMap===n.sourceMap&&e.supports===n.supports&&e.layer===n.layer)return;t.update(n=e)}else t.remove()}}n.exports=function(n,s){var r=o(n=n||[],s=s||{});return function(n){n=n||[];for(var l=0;l<r.length;l++){var a=t(r[l]);e[a].references--}for(var c=o(n,s),i=0;i<r.length;i++){var u=t(r[i]);0===e[u].references&&(e[u].updater(),e.splice(u,1))}r=c}}},19216:function(n){n.exports=function(n){var e=document.createElement("style");return n.setAttributes(e,n.attributes),n.insert(e,n.options),e}},3565:function(n,e,t){n.exports=function(n){var e=t.nc;e&&n.setAttribute("nonce",e)}},7795:function(n){n.exports=function(n){if("undefined"==typeof document)return{update:function(){},remove:function(){}};var e=n.insertStyleElement(n);return{update:function(t){!function(n,e,t){var o="";t.supports&&(o+="@supports (".concat(t.supports,") {")),t.media&&(o+="@media ".concat(t.media," {"));var s=void 0!==t.layer;s&&(o+="@layer".concat(t.layer.length>0?" ".concat(t.layer):""," {")),o+=t.css,s&&(o+="}"),t.media&&(o+="}"),t.supports&&(o+="}");var r=t.sourceMap;r&&"undefined"!=typeof btoa&&(o+="\n/*# sourceMappingURL=data:application/json;base64,".concat(btoa(unescape(encodeURIComponent(JSON.stringify(r))))," */")),e.styleTagTransform(o,n,e.options)}(e,n,t)},remove:function(){!function(n){if(null===n.parentNode)return!1;n.parentNode.removeChild(n)}(e)}}}},44589:function(n){n.exports=function(n,e){if(e.styleSheet)e.styleSheet.cssText=n;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(n))}}},77794:function(n,e,t){n.exports=t.p+"456f618a6e4559de337b.jpg"}}]);
//# sourceMappingURL=17797.82a2f2183302fa63a7a8.min.js.map | PypiClean |
/Kreveik-0.6.0.tar.gz/Kreveik-0.6.0/kreveik/genetic/__init__.py | def score(element,scorer=None):
"""
If a scorer is specified, the score of the element calculated with that
scorer is returned.
If scorer is not set, then the element is scored with the scorer specified
in its definition.
"""
if scorer == None:
element.score = element.scorer(element)
else:
return scorer(element)
def genetic_iteration(ensemble,**kwargs):
'''
Runs one iteration of the genetic algorithm.
It finds wildtypes of the family, mutates them, populates the family with mutants
and assasinates as much of it has mutated.
'''
import logging
if ((ensemble.scorer == None) or (ensemble.selector == None)
or (ensemble.mutator == None) or (ensemble.killer == None)):
raise ValueError("An element needs its scorer, killer, selector and mutator \
defined in order to be fed into the GA")
return False
logging.info("GA: for ensemble "+str(ensemble)+" started.")
killcount = 0
newcomer_list = []
for element in ensemble:
try:
element.score = ensemble.scorer(element)
logging.info("GA: Scoring Network "+str(element))
except:
logging.error("GA: The scoring of the element failed.")
if ensemble.selector(element,**kwargs):
newcomer = element.copy()
logging.info("GA: Mutating Wildtype "+str(element))
ensemble.mutator(newcomer)
newcomer_list.append(newcomer)
killcount += 1
logging.info("GA: Adding Newcomers")
for individual in newcomer_list:
ensemble.add(individual)
logging.info("New network added, "+str(individual)+".")
individual.populate_equilibria()
individual.score = ensemble.scorer(individual)
logging.info("GA: Killing...")
ensemble.killer(ensemble,killcount)
def stop_iteration(ensemble, number, **kwargs):
"""
"""
import numpy as num
import logging
if number>200:
return False
else:
if 'scores' in kwargs:
last_scores = kwargs['scores'][-100:]
logging.debug("Last scores are: "+str(last_scores))
previous_scores = kwargs['scores'][-200:-100]
logging.debug("Previous scores are: "+str(previous_scores))
else:
last_scores = [network.score for network in ensemble][-100:]
previous_scores = [network.score for network in ensemble][-200:-100]
difference = num.std(previous_scores) - num.mean(last_scores)
if abs(difference) < 0.001:
return True
else:
return False
__all__= [genetic_iteration, score] | PypiClean |
/BitstampClient-2.2.10.tar.gz/BitstampClient-2.2.10/README.rst | .. image:: https://badge.fury.io/py/BitstampClient.svg
:target: https://badge.fury.io/py/BitstampClient
======================
bitstamp-python-client
======================
Python package to communicate with the bitstamp.net API (v1 and v2).
Compatible with Python 2.7+ and Python 3.3+
Overview
========
There are two classes. One for the public part of API and a second for the
trading part.
Public class doesn't need user credentials, because API commands which this
class implements are not bound to bitstamp user account.
Description of API: https://www.bitstamp.net/api/
Install
=======
Install from PyPi::
pip install BitstampClient
Install from git::
pip install git+git://github.com/kmadac/bitstamp-python-client.git
Usage
=====
Here's a quick example of usage::
>>> import bitstamp.client
>>> public_client = bitstamp.client.Public()
>>> print(public_client.ticker()['volume'])
8700.01208078
>>> trading_client = bitstamp.client.Trading(
... username='999999', key='xxx', secret='xxx')
>>> print(trading_client.account_balance()['fee'])
0.5000
>>> print(trading_client.ticker()['volume']) # Can access public methods
8700.01208078
How to activate a new API key
=============================
1. Login your Bitstamp account
2. Click on Security -> Api Access
3. Select permissions which you want to have for you access key (if you don't
check any box, you will get error message 'No permission found' after each
API call)
4. Click the 'Generate key' button and don't forget to write down your Secret!
5. Click 'Activate'
6. Goto your Inbox and click on link sent by Bitstamp to activate this API key
Class diagram
=============
.. image:: https://raw.github.com/kmadac/bitstamp-python-client/master/class_diagram.png
:alt: Class diagram
:align: center
| PypiClean |
/Apycula-0.9.0a1.tar.gz/Apycula-0.9.0a1/doc/sdram.md | # SDRAM
Gowin devices with the R suffic such as the GW1NR-9 have built-in SDRAM.
This SDRAM is a System-in-Package wirdebonded of the shelf SDRAM module.
So there isn't so much to fuzz, you just have to know the pinout and the model.
Gowin has been so kind as to provide LiteX with [the details](https://github.com/litex-hub/litex-boards/blob/8a33c2aa312dddc66297f7cd6e39107fda5a2efb/litex_boards/targets/trenz_tec0117.py#L92-L118) of the model and pinout. That is... the magic wire names that result in the vendor placing the IOB in the correct place.
For the open source tools, you can't use the magic wire names. But what you can do is feed the magic wire names to the vendor and look at the generated placement.
This is what has been done in `/legacy/sdram`, which is a standalone script not tied into the rest of Apicula.
The result for GW1NR-9 is as below. A daring adventurer could use these to develop their own SDRAM controller or try to add support for LiteX on open source Gowin tools.
```
IO_sdram_dq(0) -> R29C26_IOA
IO_sdram_dq(1) -> R29C27_IOA
IO_sdram_dq(2) -> R29C35_IOA
IO_sdram_dq(3) -> R29C36_IOA
IO_sdram_dq(4) -> R29C37_IOA
IO_sdram_dq(5) -> R29C38_IOA
IO_sdram_dq(6) -> R29C39_IOA
IO_sdram_dq(7) -> R29C40_IOA
IO_sdram_dq(8) -> R29C16_IOB
IO_sdram_dq(9) -> R29C17_IOB
IO_sdram_dq(10) -> R29C18_IOA
IO_sdram_dq(11) -> R29C18_IOB
IO_sdram_dq(12) -> R29C19_IOB
IO_sdram_dq(13) -> R29C20_IOB
IO_sdram_dq(14) -> R29C21_IOB
IO_sdram_dq(15) -> R29C22_IOB
O_sdram_clk -> R1C4_IOB
O_sdram_cke -> R1C9_IOA
O_sdram_cs_n -> R1C35_IOB
O_sdram_cas_n -> R1C40_IOB
O_sdram_ras_n -> R1C40_IOA
O_sdram_wen_n -> R1C44_IOA
O_sdram_addr(0) -> R1C31_IOA
O_sdram_addr(1) -> R1C28_IOA
O_sdram_addr(2) -> R1C27_IOA
O_sdram_addr(3) -> R1C26_IOA
O_sdram_addr(4) -> R1C22_IOB
O_sdram_addr(5) -> R1C21_IOB
O_sdram_addr(6) -> R1C18_IOB
O_sdram_addr(7) -> R1C18_IOA
O_sdram_addr(8) -> R1C14_IOB
O_sdram_addr(9) -> R1C14_IOA
O_sdram_addr(10) -> R1C31_IOB
O_sdram_addr(11) -> R1C9_IOB
O_sdram_dqm(0) -> R1C44_IOB
O_sdram_dqm(1) -> R1C4_IOA
O_sdram_ba(0) -> R1C35_IOA
O_sdram_ba(1) -> R1C32_IOA
``` | PypiClean |
/K40Silence-0.0.1.tar.gz/K40Silence-0.0.1/src/core/svg_io.py | import os
from ..svgelements import (SVG, Group, Path, Shape, SVGImage, SVGText)
MILS_PER_MM = 39.3701
def plugin(kernel, lifecycle=None):
if lifecycle == "register":
kernel.register("load/SVGLoader", SVGLoader)
class SVGLoader:
@staticmethod
def load_types():
yield "Scalable Vector Graphics", ("svg",), "image/svg+xml"
@staticmethod
def load(context, elements_modifier, pathname, **kwargs):
bed_dim = context.get_context("bed")
bed_dim.setting(float, "bed_width", 325.0)
bed_dim.setting(float, "bed_height", 220.0)
if "svg_ppi" in kwargs:
ppi = float(kwargs["svg_ppi"])
else:
ppi = 96.0
if ppi == 0:
ppi = 96.0
scale_factor = 1000.0 / ppi
svg = SVG.parse(
source=pathname,
reify=False,
width="%fmm" % (bed_dim.bed_width),
height="%fmm" % (bed_dim.bed_height),
ppi=ppi,
color="none",
transform="scale(%f)" % scale_factor,
)
return SVGLoader.parse(
svg, elements_modifier, pathname
)
@staticmethod
def parse(svg, elements_modifier, pathname):
for element in svg:
try:
if element.values["visibility"] == "hidden":
continue
except KeyError:
pass
except AttributeError:
pass
# if isinstance(element, SVGText):
# if element.text is None:
# continue
# if element.stroke == "red":
# elements_modifier.cut_cutcode(element)
# elif element.stroke == "blue":
# elements_modifier.engrave_cutcode(element)
# else:
# elements_modifier.raster_cutcode(element)
if isinstance(element, Path):
if len(element) == 0:
continue
element.approximate_arcs_with_cubics()
if element.stroke == "red":
elements_modifier.cut_cutcode(abs(element))
elif element.stroke == "blue":
elements_modifier.engrave_cutcode(abs(element))
else:
elements_modifier.raster_cutcode(abs(element))
elif isinstance(element, Shape):
if not element.transform.is_identity():
# Shape Reification failed.
element = Path(element)
element.reify()
element.approximate_arcs_with_cubics()
if len(element) == 0:
continue # Degenerate.
else:
e = Path(element)
if len(e) == 0:
continue # Degenerate.
if element.stroke == "red":
elements_modifier.cut_cutcode(abs(Path(element)))
elif element.stroke == "blue":
elements_modifier.engrave_cutcode(abs(Path(element)))
else:
elements_modifier.raster_cutcode(abs(Path(element)))
elif isinstance(element, SVGImage):
try:
element.load(os.path.dirname(pathname))
if element.image is not None:
elements_modifier.raster_cutcode(abs(element))
except OSError:
pass
elif isinstance(element, SVG):
continue
elif isinstance(element, Group):
SVGLoader.parse(element, elements_modifier, pathname)
continue
return True | PypiClean |
/fangnao-0.1.0.tar.gz/FangNao-0.1.0/docs/apis/usage_of_inputfactory.py |
# # Usage of `input_factory` module
# +
import numpy as np
import matplotlib.pyplot as plt
import brainpy as nn
# -
# ## constant_current()
# `constant_current()` function helps you to format constant current in several periods.
#
# For example, if you want to get an input in which 0-100 ms is zero, 100-400 ms is value `1.`,
# and 400-500 ms is zero, then, you can define:
# +
current, duration = nn.input_factory.constant_current([(0, 100), (1, 300), (0, 100)], 0.1)
fig, gs = nn.visualize.get_figure(1, 1)
fig.add_subplot(gs[0, 0])
ts = np.arange(0, duration, 0.1)
plt.plot(ts, current)
plt.title('[(0, 100), (1, 300), (0, 100)]')
plt.show()
# -
# Another example is this:
# +
current, duration = nn.input_factory.constant_current([(-1, 10), (1, 3), (3, 30), (-0.5, 10)], 0.1)
fig, gs = nn.visualize.get_figure(1, 1)
fig.add_subplot(gs[0, 0])
ts = np.arange(0, duration, 0.1)
plt.plot(ts, current)
plt.title('[(-1, 10), (1, 3), (3, 30), (-0.5, 10)]')
plt.show()
# -
# ## spike_current()
# `spike_current()` function helps you to construct an input like a series of short-time spikes.
# +
points, length, size, duration, _dt = [10, 20, 30, 200, 300], 1., 0.5, 1000, 0.1
current = nn.input_factory.spike_current(points, length, size, duration, _dt)
fig, gs = nn.visualize.get_figure(1, 1)
fig.add_subplot(gs[0, 0])
ts = np.arange(0, duration, _dt)
plt.plot(ts, current)
plt.title(r'points=%s, duration=%d' % (points, duration))
plt.show()
# -
# In the above example, at 10 ms, 20 ms, 30 ms, 200 ms, 300 ms, the assumed neuron produces spikes. Each spike
# lasts 1 ms, and the spike current is 0.5.
# ## ramp_current()
# +
fig, gs = nn.visualize.get_figure(2, 1)
duration, _dt = 1000, 0.1
current = nn.input_factory.ramp_current(0, 1, duration)
ts = np.arange(0, duration, _dt)
fig.add_subplot(gs[0, 0])
plt.plot(ts, current)
plt.title(r'$c_{start}$=0, $c_{end}$=%d, duration, dt=%.1f, '
r'$t_{start}$=0, $t_{end}$=None' % (duration, _dt,))
duration, _dt, t_start, t_end = 1000, 0.1, 200, 800
current = nn.input_factory.ramp_current(0, 1, duration, t_start, t_end)
ts = np.arange(0, duration, _dt)
fig.add_subplot(gs[1, 0])
plt.plot(ts, current)
plt.title(r'$c_{start}$=0, $c_{end}$=1, duration=%d, dt=%.1f, '
r'$t_{start}$=%d, $t_{end}$=%d' % (duration, _dt, t_start, t_end))
plt.show() | PypiClean |
/360monitoringcli-1.0.19-py3-none-any.whl/cli360monitoring/lib/servernotifications.py |
import json
from prettytable import PrettyTable
from datetime import datetime
from .api import apiGet
from .config import Config
from .functions import printError, printWarn
class ServerNotifications(object):
def __init__(self, config: Config, format: str = 'table'):
self.config = config
self.format = format
self.notifications = None
self.table = PrettyTable(field_names=['Start', 'End', 'Status', 'Summary'])
self.table.align['Start'] = 'c'
self.table.align['End'] = 'c'
self.table.align['Status'] = 'c'
self.table.align['Summary'] = 'l'
def fetchData(self, serverId: str, startTimestamp: float, endTimestamp: float):
"""Retrieve a list of all alerts of a specified server in the specified time period"""
# if data is already downloaded, use cached data
if self.notifications != None:
return True
params = self.config.params()
params['start'] = int(startTimestamp)
params['end'] = int(endTimestamp)
response_json = apiGet('server/' + serverId + '/notifications', 200, self.config, params)
if response_json:
if 'data' in response_json:
self.notifications = response_json['data']
return True
else:
printWarn('No notifications found for server', serverId)
self.notifications = None
return False
else:
self.notifications = None
return False
def list(self, serverId: str, startTimestamp: float, endTimestamp: float, sort: str = '', reverse: bool = False, limit: int = 0):
"""Iterate through list of server notifications and print details"""
if self.fetchData(serverId, startTimestamp, endTimestamp):
# if JSON was requested and no filters, then just print it without iterating through
if self.format == 'json':
print(json.dumps(self.notifications, indent=4))
return
# Iterate through list of servers and print data, etc.
for notification in self.notifications:
self.print(notification)
self.printFooter(sort=sort, reverse=reverse, limit=limit)
def printFooter(self, sort: str = '', reverse: bool = False, limit: int = 0):
"""Print table if table format requested"""
if (self.format == 'table'):
# if self.config.hide_ids:
# self.table.del_column('ID')
if sort:
# if sort contains the column index instead of the column name, get the column name instead
if sort.isdecimal():
sort = self.table.get_csv_string().split(',')[int(sort) - 1]
else:
sort = None
if limit > 0:
print(self.table.get_string(sortby=sort, reversesort=reverse, start=0, end=limit))
else:
print(self.table.get_string(sortby=sort, reversesort=reverse))
elif (self.format == 'csv'):
print(self.table.get_csv_string(delimiter=self.config.delimiter))
def print(self, notification):
"""Print the data of the specified contact"""
if (self.format == 'json'):
print(json.dumps(notification, indent=4))
return
startTimestamp = datetime.fromtimestamp(float(notification['start']))
endTimestamp = datetime.fromtimestamp(float(notification['end']))
status = notification['status']
summary = notification['summary']
self.table.add_row([startTimestamp.strftime('%Y-%m-%d %H:%M:%S'), endTimestamp.strftime('%Y-%m-%d %H:%M:%S'), status, summary]) | PypiClean |
/Dooders-0.0.3.tar.gz/Dooders-0.0.3/dooders/sdk/models/arena.py | from typing import TYPE_CHECKING, Generator
import networkx as nx
from pydantic import BaseModel
from sklearn.decomposition import PCA
from dooders.sdk.models import Dooder
if TYPE_CHECKING:
from dooders.sdk.base.reality import BaseSimulation
gene_embedding = PCA(n_components=3)
class Attributes(BaseModel):
dooders_created: int = 0
dooders_died: int = 0
class Arena:
"""
Class manages Dooder objects in the simulation.
The class also keeps track of the total number of Dooders created and
terminated for each cycle. (The Information class will have historical
data for the above stats. The counts are reset after each cycle.)
Parameters
----------
simulation : Simulation object
The simulation object that contains the environment, agents,
and other models.
Attributes
----------
dooders_created : int
The total number of Dooders created (for the current cycle).
dooders_terminated: int
The total number of Dooders terminated (for the current cycle).
graph : networkx.Graph
The graph object that contains the Dooder objects and relationships.
active_dooders : dict
Current active Dooders indexed by their unique id.
graveyard : list
Terminated Dooders IDs
simulation: see ``Parameters`` section.
seed : function
The function that generates the seed population to start
the simulation.
Methods
-------
_setup() -> None
Setup the Arena. This will reset attributes.
step() -> None
Step the Arena forward. Currently, this will only reset attributes.
reset() -> None
Reset main attributes after each cycle.
generate_seed_population() -> None
Generate seed population based on the selected strategy.
generate_dooder(position: tuple, tag: str = 'Seed') -> Dooder
Generate a new dooder and place it in the environment
place_dooder(dooder: Dooder, position: tuple) -> None
Place a dooder in the environment
_generate_dooder(position: tuple, tag: str = 'Seed') -> Dooder
Generate a new dooder with a provided position
terminate_dooder(dooder: Dooder) -> None
Terminate a dooder
get_dooder(dooder_id: str) -> Dooder
Get a dooder by its unique id
dooders() -> Generator[Dooder, None, None]
Get all dooders in the environment
collect_dooders() -> None
Collect all stats from dooders
Properties
----------
active_dooder_count : int
The number of active Dooders.
state : dict
The state of the Arena.
weights : list
The weights of all active Dooders.
"""
total_counter = 0
def __init__(self, simulation: 'BaseSimulation', settings) -> None:
self.graph = nx.Graph()
self.active_dooders = {}
self.graveyard = {}
self.simulation = simulation
self.settings = settings
def _setup(self) -> None:
self.reset() # set attributes
def step(self) -> None:
"""
Step the Arena forward. Currently, this will only reset attributes.
"""
self.reset()
def reset(self) -> None:
"""
Reset main attributes after each cycle.
"""
for attribute in Attributes():
setattr(self, attribute[0], attribute[1])
def generate_seed_population(self) -> None:
"""
Generate seed population based on the selected strategy.
"""
self.initial_dooder_count = self.settings.get('SeedCount')
for position in self.SeedPlacement(self.initial_dooder_count):
self.generate_dooder(position)
def _generate_dooder(self, position: tuple, tag: str = 'Seed') -> 'Dooder':
"""
Generate a new dooder with a provided position
Parameters
----------
position : tuple
position to place dooder, (x, y)
Returns
-------
Dooder: dooder object
Newly generated Dooder object
"""
dooder = Dooder(self.simulation.generate_id(),
position, self.simulation)
dooder.tag = tag
dooder.gene_embedding = gene_embedding
return dooder
def generate_dooder(self, position: tuple) -> None:
"""
Generate a new dooder and place it in the environment
Parameters
----------
position : tuple
position to place dooder, (x, y)
"""
dooder = self._generate_dooder(position)
self.place_dooder(dooder, position)
dooder.log(granularity=1,
message=f"Created {dooder.id}", scope='Dooder')
def place_dooder(self, dooder: 'Dooder', position: tuple) -> None:
"""
Place dooder in environment
The method will also add the dooder to the active_dooders dictionary
and add the dooder to the graph for relationship tracking.
Parameters
----------
dooder : Dooder object
position : tuple
position to place dooder, (x, y)
"""
self.simulation.environment.place_object(dooder, position)
self.simulation.time.add(dooder)
self.active_dooders[dooder.id] = dooder
#! TODO: Add more attributes to graph node
self.graph.add_node(dooder.id)
self.dooders_created += 1
self.total_counter += 1
dooder.number = self.total_counter
def terminate_dooder(self, dooder: 'Dooder') -> None:
"""
Terminate dooder based on the unique id
Removes from active_dooders, environment, and time
Parameters
----------
dooder_id : str
dooder unique id, generated by the simulation
"""
self.simulation.time.remove(dooder)
self.simulation.environment.remove_object(dooder)
self.active_dooders.pop(dooder.id)
self.graveyard[dooder.id] = dooder.state
self.dooders_died += 1
del dooder
def get_dooder(self, dooder_id: str = None) -> 'Dooder':
"""
Get dooder based on the unique id, if no id is provided, a random dooder
will be selected from the active dooders. If no active dooders are
available, a random dooder will be selected from the graveyard.
Parameters:
----------
dooder_id : str
dooder unique id, generated by the simulation
Returns
-------
Dooder: dooder object
"""
if dooder_id is None:
if len(self.active_dooders) == 0:
return self.simulation.random.choice(list(self.graveyard.values()))
else:
return self.simulation.random.choice(list(self.active_dooders.values()))
else:
return self.active_dooders[dooder_id]
def dooders(self) -> Generator['Dooder', None, None]:
"""
Generator that yields all active dooders
Yields
------
Dooder: dooder object
"""
for dooder in self.active_dooders.values():
yield dooder
def collect(self) -> dict:
"""
Collects the attributes of dooders for simulation statistics.
Returns
-------
dict
A dictionary of the dooders' attributes.
"""
dooder_attributes = [
(dooder.age, dooder.hunger, dooder.energy_consumed) for dooder in self.dooders()]
dooder_count = len(dooder_attributes)
def median(data: list) -> float:
data = sorted(data)
n = len(data)
mid = n // 2
return (data[mid] if n % 2 else (data[mid - 1] + data[mid]) / 2)
if dooder_count > 0:
ages, hunger, energy_consumed = zip(*dooder_attributes)
else:
ages, hunger, energy_consumed = [], [], []
return {
'active_dooder_count': self.active_dooder_count,
'terminated_dooder_count': self.dooders_died,
'created_dooder_count': self.dooders_created,
'average_dooder_hunger': round(sum(hunger) / dooder_count, 3) if hunger else 0,
'median_dooder_age': median(ages) if ages else 0,
'average_dooder_age': round(sum(ages) / dooder_count, 3) if ages else 0,
'average_energy_consumed': round(sum(energy_consumed) / dooder_count, 3) if energy_consumed else 0
}
@property
def active_dooder_count(self) -> int:
"""
Returns the number of active dooders
"""
return len(self.active_dooders)
@property
def state(self) -> dict:
"""
Returns the state of the Arena of all active dooders
"""
return {**self.graveyard, **{k: v.state for k, v in self.active_dooders.items()}}
@property
def weights(self) -> dict:
"""
Returns the weights of the Arena for all active dooders
"""
return [v.weights['Consume'] for v in self.active_dooders.values()]
@property
def current_cycle(self) -> int:
"""
Returns the current cycle of the simulation
"""
return self.simulation.cycle_number | PypiClean |
/Freddie-0.9.8-py3-none-any.whl/freddie/db/fields.py | from typing import Any, Iterable, List, Type, Union
from peewee import (
Expression,
Field as DBField,
FieldAccessor,
ForeignKeyField,
MetaField,
Model,
Query,
)
class ManyToManyAccessor(FieldAccessor):
field: 'ManyToManyField'
def __get__(
self, instance: Model, instance_type: Type[Model] = None
) -> Union[list, 'ManyToManyField']:
if instance is not None:
return instance.__data__.get(self.name, []) # type: ignore
return self.field
class ManyToManyField(MetaField):
accessor_class = ManyToManyAccessor
model: 'ModelType'
rel_model: 'ModelType'
through_model_name: str
through_model: 'ModelType'
def __init__(self, rel_model: 'ModelType', through_model_name: str, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.rel_model = rel_model
self.through_model_name = through_model_name
def __call__(self, pk: Any) -> 'QueryBuilder':
return QueryBuilder(pk, self)
@property
def model_name(self) -> str:
return self.model.__name__.lower()
@property
def rel_model_name(self) -> str:
return self.rel_model.__name__.lower()
@property
def rel_model_keys(self) -> Iterable[str]:
return tuple(self.rel_model._meta.fields.keys())
@property
def rel_model_pk(self) -> DBField:
return self.rel_model._meta.primary_key
@property
def model_fk(self) -> ForeignKeyField:
return getattr(self.through_model, self.model_name)
@property
def rel_model_fk(self) -> ForeignKeyField:
return getattr(self.through_model, self.rel_model_name)
@property
def property_deps(self) -> List[DBField]:
'''ManyToManyField depends on field referenced by relation to join tables'''
return [self.model_fk.rel_field]
class QueryBuilder:
pk: Any
field: ManyToManyField
name: str
__slots__ = ('pk', 'field', 'name')
def __init__(self, pk: Any, field: ManyToManyField):
super().__init__()
self.pk = pk
self.field = field
def get(
self,
fields: Iterable[DBField] = None,
conditions: Iterable[Expression] = None,
) -> Query:
related_objects_pks = self.field.through_model.select(self.field.rel_model_fk).where(
self.field.model_fk == self.pk
)
rel_model_fields = fields if fields else (self.field.rel_model,)
query = self.field.rel_model.select(*rel_model_fields).where(
self.field.rel_model_pk << related_objects_pks, *(conditions or ())
)
return query
def add(self, *related_model_ids: Any) -> Query:
if not related_model_ids:
raise ValueError('No objects IDs passed for many-to-many relation')
data = [
{self.field.rel_model_name: related_id, self.field.model_name: self.pk}
for related_id in related_model_ids
]
return self.field.through_model.insert_many(data)
def clear(self) -> Query:
return self.field.through_model.delete().where(self.field.model_fk == self.pk)
ModelType = Type[Model] | PypiClean |
/Bgp-0.2.tar.gz/Bgp-0.2/bgp/bgplib.py |
# http://trac.secdev.org/scapy/ticket/162
# scapy.contrib.description = BGP
# scapy.contrib.status = loads
import pdb
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import TCP
class BGPIPField(Field):
"""Represents how bgp dose an ip prefix in (length, prefix)"""
def mask2iplen(self,mask):
"""turn the mask into the length in bytes of the ip field"""
return (mask + 7) // 8
def h2i(self, pkt, h):
"""human x.x.x.x/y to internal"""
ip,mask = re.split( '/', h)
return int(mask), ip
def i2h( self, pkt, i):
mask, ip = i
return ip + '/' + str( mask )
def i2repr( self, pkt, i):
"""make it look nice"""
return self.i2h(pkt,i)
def i2len(self, pkt, i):
"""rely on integer division"""
mask, ip = i
return self.mask2iplen(mask) + 1
def i2m(self, pkt, i):
"""internal (ip as bytes, mask as int) to machine"""
mask, ip = i.default
ip = inet_aton( ip )
return struct.pack(">B",mask) + ip[:self.mask2iplen(mask)]
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
def getfield(self, pkt, s):
l = self.mask2iplen( struct.unpack(">B",s[0])[0] ) + 1
return s[l:], self.m2i(pkt,s[:l])
def m2i(self,pkt,m):
mask = struct.unpack(">B",m[0])[0]
ip = "".join( [ m[i + 1] if i < self.mask2iplen(mask) else '\x00' for i in range(4)] )
return (mask,inet_ntoa(ip))
class BGPHeader(Packet):
"""The first part of any BGP packet"""
name = "BGP header"
fields_desc = [
XBitField("marker",0xffffffffffffffffffffffffffffffff, 0x80 ),
ShortField("len", None),
ByteEnumField("type", 4, {0:"none", 1:"open",2:"update",3:"notification",4:"keep_alive"}),
]
def post_build(self, p, pay):
if self.len is None and pay:
l = len(p) + len(pay)
p = p[:16]+struct.pack("!H", l)+p[18:]
return p+pay
class BGPOptionalParameter(Packet):
"""Format of optional Parameter for BGP Open"""
name = "BGP Optional Parameters"
fields_desc = [
ByteField("type", 2),
ByteField("len", None),
StrLenField("value", "", length_from = lambda x: x.len),
]
def post_build(self,p,pay):
if self.len is None:
l = len(p) - 2 # 2 is length without value
p = p[:1]+struct.pack("!B", l)+p[2:]
return p+pay
def extract_padding(self, p):
"""any thing after this packet is extracted is padding"""
return "",p
class BGPOpen(Packet):
""" Opens a new BGP session"""
name = "BGP Open Header"
fields_desc = [
ByteField("version", 4),
ShortField("AS", 0),
ShortField("hold_time", 0),
IPField("bgp_id","0.0.0.0"),
ByteField("opt_parm_len", None),
PacketListField("opt_parm",[], BGPOptionalParameter, length_from=lambda p:p.opt_parm_len),
]
def post_build(self, p, pay):
if self.opt_parm_len is None:
l = len(p) - 10 # 10 is regular length with no additional options
p = p[:9] + struct.pack("!B",l) +p[10:]
return p+pay
class BGPAuthenticationData(Packet):
name = "BGP Authentication Data"
fields_desc = [
ByteField("AuthenticationCode", 0),
ByteField("FormMeaning", 0),
FieldLenField("Algorithm", 0),
]
class BGPPathAttribute(Packet):
"the attribute of total path"
name = "BGP Attribute fields"
fields_desc = [
FlagsField("flags", 0x40, 8, ["NA0","NA1","NA2","NA3","Extended-Length","Partial","Transitive","Optional"]), #Extened leght may not work
ByteEnumField("type", 1, {1:"ORIGIN", 2:"AS_PATH", 3:"NEXT_HOP", 4:"MULTI_EXIT_DISC", 5:"LOCAL_PREF", 6:"ATOMIC_AGGREGATE", 7:"AGGREGATOR"}),
ByteField("attr_len", None),
StrLenField("value", "", length_from = lambda p: p.attr_len),
]
def post_build(self, p, pay):
if self.attr_len is None:
l = len(p) - 3 # 3 is regular length with no additional options
p = p[:2] + struct.pack("!B",l) +p[3:]
return p+pay
def extract_padding(self, p):
"""any thing after this packet is extracted is padding"""
return "",p
class BGPUpdate(Packet):
"""Update the routes WithdrawnRoutes = UnfeasiableRoutes"""
name = "BGP Update fields"
fields_desc = [
ShortField("withdrawn_len", None),
FieldListField("withdrawn",[], BGPIPField("","0.0.0.0/0"), length_from=lambda p:p.withdrawn_len),
ShortField("tp_len", None),
PacketListField("total_path", [], BGPPathAttribute, length_from = lambda p: p.tp_len),
FieldListField("nlri",[], BGPIPField("","0.0.0.0/0"), length_from=lambda p:p.underlayer.len - 23 - p.tp_len - p.withdrawn_len), # len should be BGPHeader.len
]
def post_build(self,p,pay):
wl = self.withdrawn_len
subpacklen = lambda p: len ( str( p ))
subfieldlen = lambda p: BGPIPField("", "0.0.0.0/0").i2len(self, p )
if wl is None:
wl = sum ( map ( subfieldlen , self.withdrawn))
p = p[:0]+struct.pack("!H", wl)+p[2:]
if self.tp_len is None:
l = sum ( map ( subpacklen , self.total_path))
p = p[:2+wl]+struct.pack("!H", l)+p[4+wl:]
return p+pay
class BGPNotification(Packet):
name = "BGP Notification fields"
fields_desc = [
ByteEnumField("ErrorCode",0,{1:"Message Header Error",2:"OPEN Message Error",3:"UPDATE Messsage Error",4:"Hold Timer Expired",5:"Finite State Machine",6:"Cease"}),
ByteEnumField("ErrorSubCode",0,{1:"MessageHeader",2:"OPENMessage",3:"UPDATEMessage"}),
LongField("Data", 0),
]
class BGPErrorSubcodes(Packet):
name = "BGP Error Subcodes"
Fields_desc = [
ByteEnumField("MessageHeader",0,{1:"Connection Not Synchronized",2:"Bad Message Length",3:"Bad Messsage Type"}),
ByteEnumField("OPENMessage",0,{1:"Unsupported Version Number",2:"Bad Peer AS",3:"Bad BGP Identifier",4:"Unsupported Optional Parameter",5:"Authentication Failure",6:"Unacceptable Hold Time"}),
ByteEnumField("UPDATEMessage",0,{1:"Malformed Attribute List",2:"Unrecognized Well-Known Attribute",3:"Missing Well-Known Attribute",4:"Attribute Flags Error",5:"Attribute Length Error",6:"Invalid ORIGIN Attribute",7:"AS Routing Loop",8:"Invalid NEXT_HOP Attribute",9:"Optional Attribute Error",10:"Invalid Network Field",11:"Malformed AS_PATH"}),
]
bind_layers( TCP, BGPHeader, dport=179)
bind_layers( TCP, BGPHeader, sport=179)
bind_layers( BGPHeader, BGPOpen, type=1)
bind_layers( BGPHeader, BGPUpdate, type=2)
bind_layers( BGPHeader, BGPHeader, type=4)
if __name__ == "__main__":
print "Bgp lib"
# interact(mydict=globals(), mybanner="BGP addon .05") | PypiClean |
/KqlmagicCustom-0.1.114.post13-py3-none-any.whl/Kqlmagic/kusto_client.py |
from typing import Dict
import re
import uuid
import json
from .my_aad_helper_msal import _MyAadHelper, ConnKeysKCSB
from .kql_response import KqlQueryResponse, KqlError
from .constants import Constants, ConnStrKeys, Cloud
from ._version import __version__
from .log import logger
from .exceptions import KqlEngineError
from .my_utils import json_dumps
from .kql_client import KqlClient
class KustoClient(KqlClient):
"""
Kusto client wrapper for Python."""
_ADX_CLIENT_BY_CLOUD = {
Cloud.PUBLIC: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
Cloud.MOONCAKE: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
Cloud.FAIRFAX: "730ea9e6-1e1d-480c-9df6-0bb9a90e1a0f",
Cloud.BLACKFOREST: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
Cloud.PPE: "db662dc1-0cfe-4e1c-a843-19a68e65be58",
}
_ADX_CLIENT_BY_CLOUD[Cloud.CHINA] = _ADX_CLIENT_BY_CLOUD[Cloud.MOONCAKE]
_ADX_CLIENT_BY_CLOUD[Cloud.GOVERNMENT] = _ADX_CLIENT_BY_CLOUD[Cloud.FAIRFAX]
_ADX_CLIENT_BY_CLOUD[Cloud.GERMANY] = _ADX_CLIENT_BY_CLOUD[Cloud.BLACKFOREST]
_MGMT_ENDPOINT_VERSION = "v1"
_QUERY_ENDPOINT_VERSION = "v2"
_MGMT_ENDPOINT_TEMPLATE = "{0}/{1}/rest/mgmt"
_QUERY_ENDPOINT_TEMPLATE = "{0}/{1}/rest/query"
_ADX_PUBLIC_CLOUD_URL_SUFFIX = ".windows.net"
_ADX_MOONCAKE_CLOUD_URL_SUFFIX = ".chinacloudapi.cn"
_ADX_BLACKFOREST_CLOUD_URL_SUFFIX = ".cloudapi.de"
_ADX_FAIRFAX_CLOUD_URL_SUFFIX = ".usgovcloudapi.net"
_CLOUD_BY_ADX_HOST_SUFFIX = {
_ADX_PUBLIC_CLOUD_URL_SUFFIX: Cloud.PUBLIC,
_ADX_FAIRFAX_CLOUD_URL_SUFFIX: Cloud.FAIRFAX,
_ADX_MOONCAKE_CLOUD_URL_SUFFIX: Cloud.MOONCAKE,
_ADX_BLACKFOREST_CLOUD_URL_SUFFIX: Cloud.BLACKFOREST
}
_ADX_URL_SUFFIX_BY_CLOUD = {
Cloud.PUBLIC: _ADX_PUBLIC_CLOUD_URL_SUFFIX,
Cloud.MOONCAKE: _ADX_MOONCAKE_CLOUD_URL_SUFFIX,
Cloud.FAIRFAX: _ADX_FAIRFAX_CLOUD_URL_SUFFIX,
Cloud.BLACKFOREST: _ADX_BLACKFOREST_CLOUD_URL_SUFFIX
}
_ADX_URL_SUFFIX_BY_CLOUD[Cloud.CHINA] = _ADX_URL_SUFFIX_BY_CLOUD[Cloud.MOONCAKE]
_ADX_URL_SUFFIX_BY_CLOUD[Cloud.GOVERNMENT] = _ADX_URL_SUFFIX_BY_CLOUD[Cloud.FAIRFAX]
_ADX_URL_SUFFIX_BY_CLOUD[Cloud.GERMANY] = _ADX_URL_SUFFIX_BY_CLOUD[Cloud.BLACKFOREST]
_DATA_SOURCE_TEMPLATE = "https://{0}.kusto{1}"
_WEB_CLIENT_VERSION = __version__
_FQN_DRAFT_PROXY_CLUSTER_PATTERN = re.compile(r"http(s?)\:\/\/ade\.(int\.)?(applicationinsights|loganalytics)\.(?P<host_suffix>(io|cn|us|de)).*$")
_FQN_DRAFT_PROXY_CLUSTER_PATTERN2 = re.compile(r"http(s?)\:\/\/adx\.(int\.)?monitor\.azure\.(?P<host_suffix>(com|cn|us|de)).*$")
_CLOUD_BY_ADXPROXY_HOST_SUFFIX = {
"com": Cloud.PUBLIC,
"io": Cloud.PUBLIC,
"us": Cloud.FAIRFAX,
"cn": Cloud.MOONCAKE,
"de": Cloud.BLACKFOREST
}
def __init__(self, cluster_name:str, conn_kv:Dict[str,str], **options)->None:
"""
Kusto Client constructor.
Parameters
----------
kusto_cluster : str
Kusto cluster endpoint. Example: https://help.kusto.windows.net
client_id : str
The AAD application ID of the application making the request to Kusto
client_secret : str
The AAD application key of the application making the request to Kusto.
if this is given, then username/password should not be.
username : str
The username of the user making the request to Kusto.
if this is given, then password must follow and the client_secret should not be given.
password : str
The password matching the username of the user making the request to Kusto
authority : 'microsoft.com', optional
In case your tenant is not microsoft please use this param.
"""
super(KustoClient, self).__init__()
self.default_cloud = options.get("cloud")
cluster_name = cluster_name or conn_kv[ConnStrKeys.CLUSTER]
if cluster_name.find("://") > 0:
data_source = cluster_name
elif cluster_name.find(".kusto.") > 0:
data_source = f"https://{cluster_name}"
elif cluster_name.find(".kusto(mfa).") > 0:
data_source = f"https://{cluster_name}"
elif cluster_name.find(".kustomfa.") > 0:
data_source = f"https://{cluster_name}"
else:
adx_url_suffix = self._ADX_URL_SUFFIX_BY_CLOUD.get(self.default_cloud)
if not adx_url_suffix:
raise KqlEngineError(f"adx not supported in cloud {self.default_cloud}")
if cluster_name.endswith(adx_url_suffix):
data_source = f"https://{cluster_name}"
else:
data_source = self._DATA_SOURCE_TEMPLATE.format(cluster_name, adx_url_suffix)
self._mgmt_endpoint = self._MGMT_ENDPOINT_TEMPLATE.format(data_source, self._MGMT_ENDPOINT_VERSION)
self._query_endpoint = self._QUERY_ENDPOINT_TEMPLATE.format(data_source, self._QUERY_ENDPOINT_VERSION)
match = self._FQN_DRAFT_PROXY_CLUSTER_PATTERN.match(data_source) or self._FQN_DRAFT_PROXY_CLUSTER_PATTERN2.match(data_source)
if match:
cloud = self._CLOUD_BY_ADXPROXY_HOST_SUFFIX.get(match.group("host_suffix")) or self.default_cloud
cloud_url_suffix = self._ADX_URL_SUFFIX_BY_CLOUD.get(cloud)
auth_resource = f"https://kusto.kusto{cloud_url_suffix}"
else:
auth_resource = data_source
cloud = self.getCloudFromHost(auth_resource)
client_id = self._ADX_CLIENT_BY_CLOUD[cloud]
http_client = self._http_client if options.get("auth_use_http_client") else None
self._aad_helper = _MyAadHelper(ConnKeysKCSB(conn_kv, auth_resource), client_id, http_client=http_client, **options) if conn_kv.get(ConnStrKeys.ANONYMOUS) is None else None
self._data_source = data_source
@property
def data_source(self)->str:
return self._data_source
@property
def deep_link_data_source(self)->str:
match = self._FQN_DRAFT_PROXY_CLUSTER_PATTERN.match(self.data_source) or self._FQN_DRAFT_PROXY_CLUSTER_PATTERN2.match(self.data_source)
if match:
cloud = self._CLOUD_BY_ADXPROXY_HOST_SUFFIX.get(match.group("host_suffix")) or self.default_cloud
cloud_url_suffix = self._ADX_URL_SUFFIX_BY_CLOUD.get(cloud)
return f"https://help.kusto{cloud_url_suffix}"
else:
return self._data_source
def getCloudFromHost(self, host:str)->str:
for adx_host_suffix in self._CLOUD_BY_ADX_HOST_SUFFIX:
if host.endswith(adx_host_suffix):
return self._CLOUD_BY_ADX_HOST_SUFFIX[adx_host_suffix]
return Cloud.PUBLIC
def execute(self, kusto_database:str, kusto_query:str, accept_partial_results:bool=False, **options)->KqlQueryResponse:
"""
Execute a simple query or management command
Parameters
----------
kusto_database : str
Database against query will be executed.
query : str
Query to be executed
accept_partial_results : bool
Optional parameter. If query fails, but we receive some results, we consider results as partial.
If this is True, results are returned to client, even if there are exceptions.
If this is False, exception is raised. Default is False.
options["timeout"] : float, optional
Optional parameter. Network timeout in seconds. Default is no timeout.
"""
if kusto_query.startswith("."):
endpoint_version = self._MGMT_ENDPOINT_VERSION
endpoint = self._mgmt_endpoint
else:
endpoint_version = self._QUERY_ENDPOINT_VERSION
endpoint = self._query_endpoint
# print("### db: ", kusto_database, " ###")
# print("### csl: ", kusto_query, " ###")
# kusto_database = kusto_database.replace(" ", "")
# print("### db: ", kusto_database, " ###")
request_payload = {
"db": kusto_database,
"csl": kusto_query,
}
client_version = f"{Constants.MAGIC_CLASS_NAME}.Python.Client:{self._WEB_CLIENT_VERSION}"
client_request_id = f"{Constants.MAGIC_CLASS_NAME}.execute"
client_request_id_tag = options.get("request_id_tag")
if client_request_id_tag is not None:
client_request_id = f"{client_request_id};{client_request_id_tag};{str(uuid.uuid4())}/{self._session_guid}/AzureDataExplorer"
else:
client_request_id = f"{client_request_id};{str(uuid.uuid4())}/{self._session_guid}/AzureDataExplorer"
app = f'{Constants.MAGIC_CLASS_NAME};{options.get("notebook_app")}'
app_tag = options.get("request_app_tag")
if app_tag is not None:
app = f"{app};{app_tag}"
query_properties:dict = options.get("query_properties") or {}
if type(kusto_query) == str:
first_word = kusto_query.split(maxsplit=1)[0].upper()
# ADX SQL mode
if first_word in ["SELECT", "UPDATE", "CREATE", "DELETE", "EXPLAIN"]:
# SQL to Kusto cheat sheet: https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet
# MS-TDS/T-SQL Differences between Kusto Microsoft SQL Server: https://docs.microsoft.com/en-us/azure/data-explorer/kusto/api/tds/sqlknownissues
query_properties["query_language"] = "sql"
cache_max_age = options.get("request_cache_max_age")
if cache_max_age is not None and cache_max_age > 0:
query_properties["query_results_cache_max_age"] = query_properties.get("query_results_cache_max_age")\
or f"{cache_max_age}s"
if len(query_properties) > 0:
properties = {
"Options": query_properties,
"Parameters": {},
"ClientRequestId": client_request_id
}
request_payload["properties"] = json_dumps(properties)
request_headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip,deflate",
"Content-Type": "application/json; charset=utf-8",
"x-ms-client-version": client_version,
"x-ms-client-request-id": client_request_id,
"x-ms-app": app
}
user_tag = options.get("request_user_tag")
if user_tag is not None:
request_headers["x-ms-user"] = user_tag
if self._aad_helper is not None:
request_headers["Authorization"] = self._aad_helper.acquire_token()
request_headers["Fed"] = "True"
cache_max_age = options.get("request_cache_max_age")
if cache_max_age is not None:
if cache_max_age > 0:
request_headers["Cache-Control"] = f"max-age={cache_max_age}"
else:
request_headers["Cache-Control"] = "no-cache"
# print("endpoint: ", endpoint)
# print("headers: ", request_headers)
# print("payload: ", request_payload)
# print("timeout: ", options.get("timeout"))
log_request_headers = request_headers
if request_headers.get("Authorization"):
log_request_headers = request_headers.copy()
log_request_headers["Authorization"] = "..."
logger().debug(f"KustoClient::execute - POST request - url: {endpoint}, headers: {log_request_headers}, payload: {request_payload}, timeout: {options.get('timeout')}")
# collect this information, in case bug report will be generated
KqlClient.last_query_info = {
"request": {
"endpoint": endpoint,
"headers": log_request_headers,
"payload": request_payload,
"timeout": options.get("timeout"),
}
}
response = self._http_client.post(endpoint, headers=request_headers, json=request_payload, timeout=options.get("timeout"))
logger().debug(f"KustoClient::execute - response - status: {response.status_code}, headers: {response.headers}, payload: {response.text}")
# print("response status code: ", response.status_code)
# print("response", response)
# print("response text", response.text)
# collect this information, in case bug report will be generated
self.last_query_info["response"] = { # pylint: disable=unsupported-assignment-operation
"status_code": response.status_code
}
if response.status_code < 200 or response.status_code >= 300: # pylint: disable=E1101
try:
parsed_error = json.loads(response.text)
except:
parsed_error = response.text
# collect this information, in case bug report will be generated
self.last_query_info["response"]["error"] = parsed_error # pylint: disable=unsupported-assignment-operation, unsubscriptable-object
raise KqlError(response.text, response)
kql_response = KqlQueryResponse(response.json(), endpoint_version)
if kql_response.has_exceptions() and not accept_partial_results:
try:
error_message = json_dumps(kql_response.get_exceptions())
except:
error_message = str(kql_response.get_exceptions())
raise KqlError(error_message, response, kql_response)
return kql_response | PypiClean |
/KiMoPack_noqt-6.13.7-py3-none-any.whl/KiMoPack_noqt/plot_func.py | version = "6.13.7"
Copyright = '@Jens Uhlig'
if 1: #Hide imports
import os
from os import walk
import sys
import pandas
import numpy as np
from numpy import power, log10, shape
import numbers
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.image as mpimg
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import BoundaryNorm
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
from matplotlib.offsetbox import AnchoredText
from matplotlib.ticker import AutoMinorLocator
from matplotlib.patches import Rectangle
from matplotlib import transforms
import re
import scipy
import scipy.constants
import scipy.interpolate as inp
from scipy.signal import savgol_filter
from scipy.signal import decimate
from scipy.special import erf
from scipy.optimize import minimize
from scipy.stats import binned_statistic
import scipy.stats
import pathlib
from pathlib import Path
from tkinter import filedialog
import tkinter
import time as tm #sorry i use time in my code
import lmfit
import h5py
try:
import PyQt5
except:
try:
import PyQt4
except:
try:
import qt
except:
print('Qt was not found')
try:
from pptx import Presentation
from pptx.util import Inches
except:
print('We need python-pptx to create a powerpoint file. Not essential. Either use pip or for anaconda: conda install -c conda-forge python-pptx')
try:
import urllib3
import shutil
except:
print('We need the packages urllib3 and shutil to download files from the web')
plt.ion()
pandas.options.mode.chained_assignment = None # I use this a lot and think I can ignore it
FWHM = 2.35482
shading = 'auto' # gouraud
standard_map = cm.jet
print('Plot_func version %s\nwas imported from path:\n %s' % (version, os.path.dirname(os.path.realpath(__file__))))
print('The current working folder is:\n %s' % os.getcwd())
#use this to trigger a real error for DeprecationWarnings
#np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)
def download_notebooks():
'''function loads the workflow notebooks into the active folder'''
http = urllib3.PoolManager()
list_of_tools=['TA_Advanced_Fit.ipynb',
'TA_comparative_plotting_and_data_extraction.ipynb',
'TA_Raw_plotting.ipynb',
'TA_Raw_plotting_and_Simple_Fit.ipynb',
'TA_single_scan_handling.ipynb',
'Function_library_overview.pdf',
'function_library.py',
'import_library.py']
print('Now downloading the workflow tools')
for f in list_of_tools:
url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Workflow_tools/%s"%f
print('Downloading Workflow Tools/%s'%f)
with open(check_folder(path = 'Workflow_tools', current_path = os.getcwd(), filename = f), 'wb') as out:
r = http.request('GET', url, preload_content=False)
shutil.copyfileobj(r, out)
def download_all():
''' function loads workflow notebooks and example files and tutorials'''
http = urllib3.PoolManager()
list_of_tools=['TA_Advanced_Fit.ipynb',
'TA_comparative_plotting_and_data_extraction.ipynb',
'TA_Raw_plotting.ipynb',
'TA_Raw_plotting_and_Simple_Fit.ipynb',
'TA_single_scan_handling.ipynb',
'Function_library_overview.pdf',
'function_library.py',
'import_library.py',
'Tutorial_Notebooks_for_local_use.zip']
print('Now downloading the workflow tools and tutorials')
for f in list_of_tools:
url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Workflow_tools/%s"%f
print('Downloading Workflow Tools/%s'%f)
with open(check_folder(path = 'Workflow_tools', current_path = os.getcwd(), filename = f), 'wb') as out:
r = http.request('GET', url, preload_content=False)
shutil.copyfileobj(r, out)
list_of_example_data=['sample_1_chirp.dat',
'Sample_2_chirp.dat',
'sample_1.hdf5',
'sample_2.hdf5',
'Sample_1.SIA',
'Sample_2.SIA']
print('Now downloading the example files')
for f in list_of_example_data:
url = "https://raw.githubusercontent.com/erdzeichen/KiMoPack/main/Workflow_tools/Data/%s"%f
print('Downloading Workflow Tools/Data/%s'%f)
with open(check_folder(path = 'Workflow_tools'+os.sep+'Data', current_path = os.getcwd(), filename = f), 'wb') as out:
r = http.request('GET', url, preload_content=False)
shutil.copyfileobj(r, out)
def changefonts(weight='bold', font='standard', SMALL_SIZE=11, MEDIUM_SIZE=13, LARGE_SIZE=18):
'''
Small function that sets the matplotlib font sizes and fonts, written as conveniens to not need to remember all the
codes and what is names what. Calling the function will change the matplotlib *rc* settings
Parameters
------------
weight : str, optional
'bold' or 'normal'
font : str, optional
this is a meta switch that changes the family. known are:
'standard'='DejaVu Sans'\n
'arial'='Arial'\n
'helvetica'= 'Helvetica'\n
'garamond'='Garamond'\n
'verdana'='Verdana'\n
'bookman'='Bookman'\n
'times'='Times New Roman'
SMALL_SIZE : int, optional
(DEFAULT = 11)\n
all written text, legend title and face size
MEDIUM_SIZE : int, optional
(DEFAULT = 13)\n
tick size and tick numbers
LARGE_SIZE : int, optional
(DEFAULT = 18)\n
axis titles, figure titles, axis labels
'''
font_dict = {
'standard': {'weight': weight, 'size': SMALL_SIZE, 'family': 'DejaVu Sans'},
'arial': {'weight': weight, 'size': SMALL_SIZE, 'family': 'Arial'},
'helvetica': {'weight': weight, 'size': SMALL_SIZE, 'family': 'Helvetica'},
'garamond': {'weight': weight, 'size': SMALL_SIZE, 'family': 'Garamond'},
'verdana': {'weight': weight, 'size': SMALL_SIZE, 'family': 'Verdana'},
'bookman': {'weight': weight, 'size': SMALL_SIZE, 'family': 'Bookman'},
'times': {'weight': weight, 'size': SMALL_SIZE, 'family': 'Times New Roman'},
}
plt.rc('font', **font_dict[font])
plt.rc('axes', titlesize=LARGE_SIZE, labelweight=weight) # fontsize of the axes title
plt.rc('axes', labelsize=LARGE_SIZE, labelweight=weight) # fontsize of the x and y labels
plt.rc('axes', linewidth=1) # linewidth of all axes
plt.rc('axes', facecolor=(1, 1, 1, 0))
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('legend', title_fontsize=SMALL_SIZE)
plt.rc('legend', facecolor=(1, 1, 1, 0))
plt.rc('legend', edgecolor=(1, 1, 1, 0))
plt.rc('legend', framealpha=0)
plt.rc('figure', titlesize=LARGE_SIZE) # fontsize of the figure title
plt.rc('figure', facecolor=(1, 1, 1, 0)) # fontsize of the figure title
changefonts() #we need to apply the font settings
def clean_double_string(filename, path=None):
'''Stupid function that reads and changes!!! the file. It searchers for double lines and double dots and replaces them with single'''
import re
if path is None: path = os.path.dirname(os.path.realpath(__file__))
with open(Path(os.sep.join([path, filename])), 'r+') as f:
text = f.read()
text = re.sub('--', '-', text)
text = re.sub(r'\.+', '.', text)
f.seek(0)
f.write(text)
f.truncate()
def mouse_move(event):
x, y = event.xdata, event.ydata
print(x, y)
def flatten(mainlist):
return [entry for sublist in mainlist for entry in sublist]
def nearest_neighbor_method3(X, q):
'''returns nearest neighbour value to q'''
X = X.T
return np.argmin(np.sum((X - q) ** 2, axis=1))
def log_and(x, y, *args):
"""Returns the logical and of all 2+ arguments."""
result = np.logical_and(x, y)
for a in args:
result = np.logical_and(result, a)
return result
def s2_vs_smin2(Spectral_points = 512, Time_points = 130, number_of_species = 3, fitted_kinetic_pars = 7, target_quality = 0.95):
'''dfn is numerator and number of fitted parameters, dfd is denominator and number of degrees of freedom,
F-test is deciding if a set of parameters gives a statistical significant difference. T-test is if a single parameter gives statistical difference.
Null hypothesis, all parameter are zero, if significant, the coefficients improve the fit
the f-statistics compares the number of
"fitted parameter"=number of species*number of spectral points + number of kinetic parameter
"free points"=number of species*number of spectral points*number of time points - fitted parameter
within the target quality, meaning, what fraction do my variances need to have, so that I'm 100% * target_quality sure that they are different from zero'''
data_points = Spectral_points*Time_points
fitted_parameter = Spectral_points*number_of_species+fitted_kinetic_pars
Free_points = data_points-fitted_parameter
f_stat = scipy.stats.f.ppf(q = target_quality, dfn = fitted_parameter, dfd = Free_points)
#print('fitted points:%g\n Free points:%g\n f-stats: %g'%(fitted_parameter,Free_points,f_stat))
return 1+(fitted_parameter*f_stat/Free_points)
def GUI_open(project_list = None, path = None, filename_part = None, fileending = 'hdf5', sep = "\t", decimal = '.',
index_is_energy = False, transpose = False, sort_indexes = False, divide_times_by = None,
shift_times_by = None, external_time = None, external_wave = None, use_same_name = True, data_type = None,
units = None, baseunit = None, conversion_function = None):
''' This Function
1. opens a gui and allows the selection of multiple saved projects, which are returned as a list
2. if given a list of project names opens them
3. if given the word 'all', opens all files in a given folder
The general behavior is selected by the first parameter (project_list)
it is designed to open combined files that contain both the wavelength and the time. (e.g. SIA files as recorded by Pascher instruments software) or hdf5 projects saved by this software
There are however a lot of additional options to open other ascii type files and adapt their format internally
Important, as default the parameter "fileending" selects hdf5 files only, which are used as project files (see :meth:`plot_func.TA.Save_project`)
for opening of other files the fileending parameter needs to be changed.
Parameters
----------
project_list : list (of str) or 'all', optional
Give a list of filenames that will be opened and returned as a list of objects
if the project list is 'all' then all files in the folder specified in path. The parameter "filename_part"
and "fileending" can be used to specify this selection
path : str or path object (optional)
if path is a string without the operation system dependent separator, it is treated as a relative path,
e.g. data will look from the working directory in the sub director data. Otherwise this has to be a
full path in either strong or path object form.
filename_part : str, optional
This parameter is only used for the option 'all', the (Default) None means do nothing. if a string is given then only
files that start with this string will be read.
fileending : str, optional
this string is used to select the filetype that is suppose to open. For the GUI, only these files will be shown,
with the option 'all' this selects the files that will be read in the folder, 'hdf5' (Default)
sep : str (optional)
is the separator between different numbers, typical are tap '\t' (Default) ,one or
multiple white spaces '\s+' or comma ','.
decimal : str (optional)
sets the ascii symbol that is used for the decimal sign. In most countries this is '.'(Default)
but it can be ',' in countries like Sweden or Germany
index_is_energy : bool (optional)
switches if the wavelength is given in nm (Default) or in eV (if True), currently everything
is handled as wavelength in nm internally
transpose : bool (optional)
if this switch is False (Default) the wavelength are the columns and the rows the times.
data_type: str (optional)
data_type is the string that represents the intensity measurements. Usually this contains if absolute
of differential data. This is used for the color intensity in the 2d plots and the y-axis for the 1d plots
units: str (optional)
this is used to identify the units on the energy axis and to label the slices, recognized is 'nm', 'eV' and 'keV'
but if another unit like 'cm^-1' is used it will state energy in 'cm^-1'. Pleas observe that if you use the index_is_energy
switch the program tries to convert this energy into wavelength.
baseunit: str (optional)
this is used to identify the units on the developing/time axis. This is name that is attached to the index of the dataframe.
setting this during import is equivalent to ta.baseunit
sort_indexes : bool (optional)
For False (Default) I assume that the times and energies are already in a rising order.
with this switch, both are sorted again.
divide_times_by : None or float (optional)
here a number can be given that scales the time by an arbitary factor. This is actually dividing
the times by this value. Alternatively there is the variable self.baseunit. The latter only affects
what is written on the axis, while this value is actually used to scale the times. None (Default)
ignores this
shift_times_by : None, float (optional)
This a value by which the time axis is shifted during import. This is a useful option of e.g.
the recording software does not compensate for t0 and the data is always shifted.
None (Default) ignores this setting
external_time : None or str (optional)
Here a filename extension (string) can be given that contains the time vector.
The file is assumed to be at the same path as the data and to contain a single
type of separated data without header.
If use_same_name = True (default)
It assumes that this is the ending for the file. The filename itself is taken from the filename.
e.g. if samp1.txt is the filename and external_time='.tid' the program searches
samp1.tid for the times. The transpose setting is applied and sets where the times are
to be inserted (row or column indexes)
If use_same_name = False this should be the file containing the vector for the time (in the same format as the main file)
external_wave : None or str (optional)
Here a filename extension (string) can be given that contains the wavelength vector.
If use_same_name = True (default)
The file is assumed to be at the same path as the data and to contain a single type
of separated data without header. This is the ending for the file. The filename itself
is taken from the filename. e.g. if samp1.txt is the filename and external_wave='.wav'
then the program searches samp1.wav for the wavelength. The transpose setting is applied
and sets where the wavelength are to be inserted (columns or row indexes)
If use_same_name = False
this should be a full filename that contains the vector
use_same_name : bool, optional
this switches if the external filename included the loaded filename or is a separate file True(default)
conversion_function: function(optional)
function that receives should have the shape:
return pandas Dataframe with time/frames in rows and wavelength/energy in columns,
The function is tested to accept (in that order) a
my_function(filename, external_time,external_wave),
my_function(filename, external_time),
my_function(filename,external_wave),
my_function(filename) and
return: the dataframe ds with the time_axis as rows and spectral axis as columns
if the ds.index.name ia not empty the "time axis" is in to that name the spectral axis is in ds.columns.name
the return is investigated if it is one, two, or three things.
if two are returned then the second must be the name of what the intensity axis is. This value will then be set to data_type
if three are returned the third is the baseunit (for the time axis) this allows to use the automatic naming in ps or nanosecond
If the values units, data_type or baseunit are (manually) set in the import function the corresponding entries in
datafram will be overwritten
shift_times_by and divide_times_by will be applied if not None (useful to adjust for offset before chirp correction)
Returns
--------------
List of opened TA objects
Examples
--------------
>>> import plot_func as pf
>>> project_list=pf.GUI_open() #start the GUI to open project Files
>>> project_list=pf.GUI_open(fileending='SIA') #start the GUI to open SIA Files
Opening a list of files using the file names
>>> project_list=pf.GUI_open(project_list = ['file1.SIA', 'file2.SIA'])
Opening all files in the folder "all_data" (relative to where the notebook is with the ending "hdf5"
>>> project_list=pf.GUI_open('all',path="all_data")
Opening a list of files with external time vector (of the same name) so it looks for a data
file "file1.txt" and a file with the time information "file1.tid"
>>> project_list=pf.GUI_open(project_list = ['file1.txt', 'file2.txt'], external_time = 'tid')
'''
if project_list is None:
root_window = tkinter.Tk()
root_window.withdraw()
root_window.attributes('-topmost',True)
root_window.after(1000, lambda: root_window.focus_force())
path_list = filedialog.askopenfilename(initialdir=os.getcwd(),multiple=True,filetypes=[('TA project files','*.%s'%fileending)])
if project_list is None:
project_list=[]
elif project_list=='all':
scan_path=check_folder(path = path, current_path = os.getcwd())
if filename_part is not None:#we specified a specific name and want only the files with this name in it
path_list = sorted([os.path.join(scan_path, name) for name in os.listdir(scan_path) if
name.endswith(fileending) and filename_part in name])
else:#we have not specified a specific name and want all files in the folder
path_list = sorted([currentFile for currentFile in scan_path.glob("*.%s"%fileending)])
else:
if len(project_list)<1:
raise ValueError('The use_gui switch is ment to bypass the gui, but you still need at least some files as a list')
else:
if isinstance(project_list, str):project_list=[project_list]
if not hasattr(project_list, '__iter__'):project_list=[project_list]
path_list = []
for filename in project_list:
ta=check_folder(path=path, filename=filename, current_path=os.getcwd())
path_list.append(ta)
return_list = []
for entrance in path_list:
try:
listen=os.path.split(entrance)
path=os.path.normpath(listen[0])
filename=listen[1]
ta = TA(filename = filename, path = path, sep = sep, decimal = decimal,
index_is_energy = index_is_energy, transpose = transpose, sort_indexes = sort_indexes,
divide_times_by = divide_times_by, shift_times_by = shift_times_by, external_time = external_time,
external_wave = external_wave, use_same_name = use_same_name, data_type = data_type, units = units,
baseunit = baseunit, conversion_function = conversion_function)
return_list.append(ta)
except:
print('Problem with entrance:\n %s'%entrance)
return return_list
def check_folder(path = None, current_path = None, filename = None):
'''Helper function using robust path determination.\n
In any case if a valif file name is given it is attached to the total path\n
The path can be string or windows/linux path or pure path or byte type paths.\n
paths that do not exists (including parents) are created\n
1. if path is given absolute, it is returned\n_colors
2. if path is a string (relative) the current_path + path is returned.\n
3. if current_path is not absolute or None, the current working directory is assumed as path.\n
4. IF all is None, the current working directory is returned
Parameters
-----------
path : str, purePath, absolute or relative, optional
the final part of the path used
current_path : None, str, purePath, absolute, optional
path that sits before the "path variable, is filled with current working directory if left None
filename: None, str, optional
attached after path and returned if not None
'''
if isinstance(path,bytes):
path = '%s'%path
if path is not None:
path = pathlib.Path(path)
if isinstance(current_path, bytes):
current_path = '%s'%current_path
if current_path is not None:
current_path=pathlib.Path(current_path)
if isinstance(filename, bytes):
filename='%s'%filename
if filename is not None:
filename = pathlib.Path(filename)
if path is None:
if current_path is None:
directory = Path.cwd()
elif current_path.is_absolute():
directory=current_path
else:
print('attention, current_path was given but not absolute, replaced by cwd')
directory = Path.cwd()
elif path.is_absolute():
directory = path
else:
if current_path is None:
directory = Path.cwd().joinpath(path)
elif current_path.is_absolute():
directory = current_path.joinpath(path)
else:
print('attention, current_path was given but not absolute, replaced by cwd')
directory = Path.cwd().joinpath(path)
directory.mkdir( parents=True, exist_ok=True)
if filename is None:
return directory
else:
return directory.joinpath(filename)
def rebin(ori_df,new_x):
'''interpolation of values to new index'''
if isinstance(ori_df,pandas.DataFrame):
dum={'dummy':new_x}
new_df=pandas.DataFrame(dum,index=new_x)
for col in ori_df.columns:
new_df[col]=np.interp(new_x,ori_df.index.values.astype('float'),ori_df[col].values)
new_df=new_df.drop(['dummy'],1)
return new_df
elif isinstance(ori_df,pandas.Series):
new_df=np.interp(new_x,ori_df.index.values.astype('float'),ori_df.values)
return pandas.Series(new_df,index=new_x)
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
'''Ported from a previous function'''
return savgol_filter(x=y, window_length=window_size, polyorder=order, deriv=deriv, delta=rate)
def Frame_golay(df, window=5, order=2,transpose=False):
'''Convenience method that returns the Golay smoothed data for each column (DataFrame) or the series
Parameters
-----------
df : pandas.DataFrame,pandas.Series
the DataFrame that has to be interpolated
window_size : int,optional
5(Default) an integer that indicates how many units are to be interpolated
order : int, optional
2 (Default) an integer that indicates what orderpolynoninal is to be used to interpolate the points.
order=1 effectively turns this into a floating average
transpose : bool,optional
in which orientation is the interpolation to be done. Default is in within the column (usually timepoints)
Returns
---------
pandas.DataFrame or pandas.Series
DataFrame or Series with the interpolation applied
'''
#df=df.fillna(0)
if transpose:
df=df.T
if isinstance(df,pandas.DataFrame):
for col in df.columns:
try:
df.loc[:,col]=savitzky_golay(df.loc[:,col].values, window, order)
except:
print(col + 'was not smoothed')
if transpose:
df=df.T
return df
elif isinstance(df,pandas.Series):
return pandas.Series(savitzky_golay(df.values, window, order),index=df.index)
else:
raise TypeError('must be series or DataFrame')
def find_nearest(arr,value,con_str=False):
'''returns the value in the array closest to value'''
return arr[find_nearest_index(arr,value,con_str=False)]
def find_nearest_index(arr,value,con_str=False):
'''returns the index in the array closest to value (the first one'''
if con_str:
temp_array=np.array(arr,dtype='float')
idx = (np.abs(temp_array-value)).argmin()
else:
idx = (np.abs(arr-value)).argmin()
return idx
def rise(x,sigma=0.1,begin=0):
''' my own implementation of the instrument response function.
Based upon an error function from 0 to 1.
Sigma is the width (after which it has 50%)
and begin is 10% of height'''
return (erf((x-sigma)*np.sqrt(2)/(sigma))+1)/2
def gauss(t,sigma=0.1,mu=0):
'''Gauss function'''
y=np.exp(-0.5*((t-mu)**2)/sigma**2)
y/=sigma*np.sqrt(2*np.pi)
return y
def norm(df):
'''Min max norming of a dataframe'''
return df.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))
def shift(df,name = None,shift = None):
'''Shifts a dataframe along the columns, interpolate and then resample'''
if name is None:name = df.columns
if isinstance(name,type('hello')):name = [name]
for nam in name:
ori_dat = df[nam].values
ori_en = np.array(df.index,dtype = 'float')
if ori_en[0]>ori_en[1]:#oh we have inverse order
dat = np.interp(ori_en[::-1],ori_en[::-1]+shift,ori_dat[::-1])
dat = dat[::-1]
else:
dat = np.interp(ori_en,ori_en+shift,ori_dat)
df[nam] = dat
return df
def colm(k,cmap = standard_map):
'''If a colour map is given, this is used.'''
if isinstance(cmap,type(cm.jet)) or isinstance(cmap,type(cm.viridis)):
if hasattr(k,'__iter__'):
if min(k) >0:#we got a color offset
mini = min(k)/(min(k)+1)
else:
mini = 0
out = [ cmap(x) for x in np.linspace(mini, 1, len(k)+1) ]
out = out[:-1]
return out
else:# get me 10 colors
out = [cmap(x) for x in np.linspace(0, 1, 10)]
ret = out[k]
return ret
else: #we assume it is a iterable thingy
if not hasattr(k,'__iter__'):k = [k]
if isinstance(cmap,pandas.DataFrame):
out = [list(cmap.iloc[ent,:].values) for ent in k]
elif isinstance(cmap,np.ndarray):
out = [cmap[int(ent),:] for ent in k]
elif isinstance(cmap,list):
out = [cmap[int(ent)] for ent in k]
else:
print('didn\'t find the right ')
return out
def Summarize_scans(list_of_scans = None, path_to_scans = 'Scans', list_to_dump = 'range', window1 = None, window2 = None,
save_name = 'combined.SIA', fileending = 'SIA', filename_part = 'Scan', return_removed_list = False,
sep = "\t", decimal = '.', index_is_energy = False, transpose = False, sort_indexes = False,
divide_times_by = None, shift_times_by = None, external_time = None, external_wave = None, use_same_name = True,
return_ds_only=False, data_type = None, units = None, baseunit = None, conversion_function = None, fitcoeff = None,
base_TA_object = None, value_filter = None, zscore_filter_level = None, zscore_in_window = True,
dump_times = True, replace_values = None, drop_scans = False):
'''
Average single scans. Uses single scans of the data set and plots them as average after different conditions. Usually one defines one or two windows in which the intensity is integrated. This integrated number is then displayed for each scan in the list. There are different tools to select certain scans that are excluded from the summary. These are defined in the list_to_dump. This list can take either be a list with the number, or a string with the words 'single' or 'range' (see below)
Parameters
-----------
list_of_scans : None, 'gui' or list
'gui' (choose scans via gui)\n
None (Default) load scan files from the specified folder (path_to_scans) with the specified file-ending
(file_ending), if filename_part is a string than only files with this string in the name are taken\n
list of names (strings) loads this list of files
list of integers (that will be directly attached to the filename_part) to form the file name
path_to_scans : None, str or path object, optional
specify relative or absolute path to the scan-files (Default:'Scans')
file_ending : str, optional
specify the file extension of the single scan files. The Gui will only show this fileending
(Default: '.SIA')
filename_part : str
specify a part of the string included in all scan-files (Default: 'Scan')
window1: None or list of 4 floats, optional
window in time and wavelength over which each scan is averaged.\n
window must have the shape [start time, end time, start wavelength, end wavelength]
(Default: None)
window2: list of 4 floats, optional
window in time and wavelength over which each scan is averaged.\n
window must have the shape [start time, end time, start wavelength, end wavelength]
(Default: None) IF not given then only one window will be displayed
list_to_dump : list, 'single' or 'range', optional
takes a list of scans to be excluded from the average, this list can be indexes (order)
in which the scans come, or a list of names. if this is given as a list the option "range"
is offered, which allows to add additional selection to the cut.\n
**'single'** allows you (in a GUI) to click on single points in plotted window1 or two that
is to be removed, useful for spike removal and makes only sense in conjunction with at least
a defined window1, if none is defined window1 = [0.5,10,300,1200] will be set automatically.
A right click removes the last selection a middle click applies it. An empty middle click
(without selecting anything) finishes the gui\n
**'range'** allows you (in a GUI) to click and define regions.\n
first left click is the left side of the window, second left click the ride side of the window.
Third left click the left side of the second window,... A right click removes the last set point.
a middle click finishes and applies the selection\n
An **empty middle click** (without selecting anything) finishes the gui\n
useful for spike removal and definition of exclusion region (e.g. where the sample died)
This makes only sense in conjunction with at least a defined window1 ,
if none is defined window1 = [0.5,10,300,1200] will be set automatically
data_type: str (optional)
data_type is the string that represents the intensity measurements. Usually this contains if absolute
of differential data. This is used for the color intensity in the 2d plots and the y-axis for the 1d plots
units: str (optional)
this is used to identify the units on the energy axis and to label the slices, recognized is 'nm', 'eV' and 'keV'
but if another unit like 'cm^-1' is used it will state energy in 'cm^-1'. Pleas observe that if you use the index_is_energy
switch the program tries to convert this energy into wavelength.
baseunit: str (optional)
this is used to identify the units on the developing/time axis. This is name that is attached to the index of the dataframe.
setting this during import is equivalent to ta.baseunit
save_name : str, optional
specify name for saving the combined scans (Default) 'combined.SIA')
return_removed_list : bool, optional
(Default) False, returns the list of removed scans instead of the averaged data set. (this list could then be given as "list_to_dump" to get the averaged datafile too. If a file name is given for saved file (which is Default) then the file is saved anyways.
sep : str (optional)
is the separator between different numbers, typical are tap (Backslash t) (Default) ,one or
multiple white spaces 'backslash s+' or comma ','.
decimal : str (optional)
sets the ascii symbol that is used for the decimal sign. In most countries this is '.'(Default)
but it can be ',' in countries like Sweden or Germany
index_is_energy : bool (optional)
switches if the wavelength is given in nm (Default) or in eV (if True), currently everything
is handled as wavelength in nm internally
transpose : bool (optional)
if this switch is False (Default) the wavelength are the columns and the rows the times.
sort_indexes : bool (optional)
For False (Default) I assume that the times and energies are already in a rising order.
with this switch, both are sorted again.
divide_times_by : None or float (optional)
here a number can be given that scales the time by an arbitary factor. This is actually dividing
the times by this value. Alternatively there is the variable self.baseunit. The latter only affects
what is written on the axis, while this value is actually used to scale the times. None (Default)
ignores this
shift_times_by : None, float (optional)
This a value by which the time axis is shifted during import. This is a useful option of e.g.
the recording software does not compensate for t0 and the data is always shifted.
None (Default) ignores this setting
external_time : None or str (optional)
Here a filename extension (string) can be given that contains the time vector.
The file is assumed to be at the same path as the data and to contain a single
type of separated data without header.
If use_same_name = True (default)
It assumes that this is the ending for the file. The filename itself is taken from the filename.
e.g. if samp1.txt is the filename and external_time='.tid' the program searches
samp1.tid for the times. The transpose setting is applied and sets where the times are
to be inserted (row or column indexes)
If use_same_name = False this should be the file containing the vector for the time (in the same format as the main file)
external_wave : None or str (optional)
Here a filename extension (string) can be given that contains the wavelength vector.
If use_same_name = True (default)
The file is assumed to be at the same path as the data and to contain a single type
of separated data without header. This is the ending for the file. The filename itself
is taken from the filename. e.g. if samp1.txt is the filename and external_wave='.wav'
then the program searches samp1.wav for the wavelength. The transpose setting is applied
and sets where the wavelength are to be inserted (columns or row indexes)
If use_same_name = False
this should be a full filename that contains the vector
use_same_name : bool, optional
this switches if the external filename included the loaded filename or is a separate file True(default)
conversion_function: function(optional)
function that receives should have the shape:
return pandas Dataframe with time/frames in rows and wavelength/energy in columns,
The function is tested to accept (in that order) a
my_function(filename, external_time,external_wave),
my_function(filename, external_time),
my_function(filename,external_wave),
my_function(filename) and
return: the dataframe ds with the time_axis as rows and spectral axis as columns
if the ds.index.name ia not empty the "time axis" is in to that name the spectral axis is in ds.columns.name
the return is investigated if it is one, two, or three things.
if two are returned then the second must be the name of what the intensity axis is. This value will then be set to data_type
if three are returned the third is the baseunit (for the time axis) this allows to use the automatic naming in ps or nanosecond
If the values units, data_type or baseunit are (manually) set in the import function the corresponding entries in
datafram will be overwritten
shift_times_by and divide_times_by will be applied if not None (useful to adjust for offset before chirp correction)
return_ds_only: boolean,(optional)
if False (Dafault) returns a TA object, otherwise just a DataFrame
fitcoeff: list, optional
these should be the shirp parameteres that are to be applied to all sub scans in the list.
base_TA_object: TA object, optional
instead of the fit_coefficients a Ta object can be provided that is then used as a template, meaning that the scattercuts and bordercuts will be applied before the filtering.
value_filter : None, float or iterable with two entries, optional
if float, everything above that value or below -abs(value_filter) will be filtered replaced with replace_values
if iterable, then first is lower treshold, second is upper treshold
zscore_filter_level : float, optional
if this value is set then the manual selection will be replaced with an automatic filter, the following options, dump_times = True,
replace_values = None, drop_scans = False decide what is done to the values that are filtered
typical value would be e.g. 3
zscore_in_window : bool,
decides if the filter is applied in the windows or over the whole matrix (using statistics on the values)
dump_times : bool,optional
Standard True means that if the zscore filter filters a file the bad time is droped for the average
replace_values : None, float, optional
if dump times is False the values will be replaced with this value. = None, drop_scans = False
drop_scans : bool,optional
Default: = False. This is the harshest type to filter and means that the whole scan is dropped
Returns
---------
TA object if return_ds_only is False(Default) averaged dataset (ds) of the selected scans or
(if return_removed_list = True) the list of removed scans.
Examples
----------
Use use a range to select the rejected scans, look on the scans by integrating the window 0.5ps to 1ps and 450nm to 470nm
>>> import plot_func as pf #import the module
>>> window1=[0.5,1,450,470] #define the window
>>> #use a 'GUI' to select the files
>>> pf.Summarize_scans(list_of_scans='gui',window1=window1)
>>> #use all scans in the subfolder scans that have the word 'Scan' in them and use the ending 'SIA'
>>> pf.Summarize_scans(path_to_scans = 'Scans', filepart_name = 'Scan', window1=window1)
>>> #This does the same as these are standard
>>> pf.Summarize_scans(window1=window1)
'''
if (base_TA_object is not None) and (conversion_function is None):
if units is None:units=base_TA_object.ds.columns.name
if baseunit is None:baseunit=base_TA_object.ds.index.name
debug = True
if list_of_scans is None:
scan_path=check_folder(path = path_to_scans, current_path = os.getcwd())
if filename_part is not None:#we specified a specific name and want only the files with this name in it
list_of_scans = sorted([os.path.join(scan_path, name) for name in os.listdir(scan_path) if
name.endswith(fileending) and filename_part in name])
else:#we have not specified a specific name and want all files in the folder
list_of_scans = sorted([currentFile for currentFile in scan_path.glob("*.%s"%fileending)])
elif list_of_scans == 'gui':
root_window = tkinter.Tk()
root_window.withdraw()
root_window.attributes('-topmost',True)
root_window.after(1000, lambda: root_window.focus_force())
path_list = filedialog.askopenfilename(initialdir = os.getcwd(),multiple = True,filetypes = [('Raw scan files',"*.%s"%fileending)])
list_of_scans = path_list
elif not hasattr(list_of_scans,'__iter__'):
raise ValueError('We need something to iterate for the list')
if not isinstance(list_of_scans[0],TA):#we do not have opened file but most likely a list of names
try:
list_of_projects = []
for entrance in list_of_scans:
listen = os.path.split(entrance)
path = os.path.normpath(listen[0])
filename = listen[1]
new_ds=TA(filename = filename,path = path, sep = sep, decimal = decimal,
index_is_energy = index_is_energy, transpose = transpose,
sort_indexes = sort_indexes, divide_times_by = divide_times_by,
shift_times_by = shift_times_by, external_time = external_time,
external_wave = external_wave, use_same_name = use_same_name,
data_type = data_type, units = units, baseunit = baseunit,
conversion_function = conversion_function).ds
if base_TA_object is None:
if fitcoeff is not None:
new_ds=Fix_Chirp(ds=new_ds,fitcoeff=fitcoeff)
list_of_projects.append(new_ds.values)
else:
if fitcoeff is not None:
new_ds=Fix_Chirp(ds=new_ds,fitcoeff=fitcoeff)
try:
new_ds=sub_ds(new_ds, ignore_time_region = base_TA_object.ignore_time_region, wave_nm_bin = base_TA_object.wave_nm_bin, baseunit = base_TA_object.baseunit,
scattercut = base_TA_object.scattercut, bordercut = base_TA_object.bordercut, timelimits = base_TA_object.timelimits, time_bin = base_TA_object.time_bin,
equal_energy_bin = base_TA_object.equal_energy_bin)
if (base_TA_object.wave_nm_bin is not None) or (base_TA_object.equal_energy_bin is not None):
print('in the original TA objec the data was rebinned, which is now also done for the single scans. To avoid that use "ta.wave_nm_bin = None" and / or "ta.equal_energy_bin = None" before handing it to base_TA_object')
except:
print('applying the base_TA_object slices failed')
list_of_projects.append(new_ds.values)
if base_TA_object is None:
ds = TA(filename = filename,path = path, sep = sep, decimal = decimal,
index_is_energy = index_is_energy, transpose = transpose, sort_indexes = sort_indexes,
divide_times_by = divide_times_by, shift_times_by = shift_times_by,
external_time = external_time, external_wave = external_wave,
use_same_name = use_same_name, data_type = data_type, units = units,
baseunit = baseunit, conversion_function = conversion_function).ds
else:
ds=base_TA_object.ds
ds = sub_ds(ds, ignore_time_region = base_TA_object.ignore_time_region, wave_nm_bin = base_TA_object.wave_nm_bin, baseunit = base_TA_object.baseunit,
scattercut = base_TA_object.scattercut, bordercut = base_TA_object.bordercut, timelimits = base_TA_object.timelimits, time_bin = base_TA_object.time_bin,
equal_energy_bin = base_TA_object.equal_energy_bin)
######################
try:
list_of_projects = np.transpose(np.array(list_of_projects),(1, 2, 0))
except:
print('the stacking of the scans failed, are you sure that all are have the same shape')
#######################
except:
raise ValueError('Sorry did not understand the project_list entry, use GUI_open to create one')
else:
try:
list_of_projects = []
list_of_scans_names = []
for entrance in list_of_scans:
list_of_projects.append(entrance.ds.values)
list_of_scans_names.append(entrance.filename)
if base_TA_object is None:
ds = list_of_scans[0]
else:
ds=base_TA_object.ds
list_of_scans = list_of_scans_names
##########################
try:
list_of_projects = np.transpose(np.array(list_of_projects),(1, 2, 0))
except:
print('the stacking of the scans failed, are you sure that all are have the same shape')
#########################
except:
raise ValueError('Sorry did not understand the project_list entry, use GUI_open to create one')
if window1 is None:
window1 = [ds.index.values.min(),ds.index.values.max(),ds.columns.values.min(),ds.columns.values.max()]
#### automatic filtering#####
if (zscore_filter_level is not None) or (value_filter is not None):
if replace_values is not None:
cut_bad_times=False
if replace_values is None:
replace_values = np.nan
dataset=list_of_projects
if value_filter is not None:
if hasattr(value_filter,'__iter__'):
lowervalue=value_filter[0]
uppervalue=value_filter[1]
else:
uppervalue = np.abs(value_filter)
lowervalue = -np.abs(value_filter)
outside_range=np.invert(log_and(dataset>lowervalue,dataset<uppervalue))
if dump_times:#this is default
outside_range=np.tile(outside_range.all(axis=1,keepdims=True),(1,dataset.shape[1],1))
elif drop_scans:
outside_range=np.tile(outside_range.any(axis=1,keepdims=True),(1,dataset.shape[1],1))
outside_range=np.tile(outside_range.any(axis=0,keepdims=True),(dataset.shape[0],1,1))
dataset[outside_range]=replace_values
if zscore_filter_level is not None:
if zscore_in_window:
window1_index = [find_nearest_index(ds.index.values,window1[0]),find_nearest_index(ds.index.values,window1[1]),find_nearest_index(ds.columns.values,window1[2]),find_nearest_index(ds.columns.values,window1[3])]
vector=np.nanmean(np.nanmean(dataset[window1_index[0]:window1_index[1],window1_index[2]:window1_index[3],:],axis=0),axis=1)
good=log_and(vector>(np.nanmean(vector) - zscore_filter_level*np.nanstd(vector)),vector<(np.nanmean(vector) + zscore_filter_level*np.nanstd(vector)))
if not window2 is None:
window2_index = [find_nearest_index(ds.index.values,window2[0]),find_nearest_index(ds.index.values,window2[1]),find_nearest_index(ds.columns.values,window2[2]),find_nearest_index(ds.columns.values,window2[3])]
vector=np.nanmean(np.nanmean(dataset[window2_index[0]:window2_index[1],window2_index[2]:window2_index[3],:],axis=0),axis=1)
good2=log_and(vector>(np.nanmean(vector) - zscore_filter_level*np.nanstd(vector)),vector<(np.nanmean(vector) + zscore_filter_level*np.nanstd(vector)))
good=log_and(good,good2)
dataset[:,:,np.invert(good)]=replace_values
else:
mean=np.nanmean(dataset,axis=2)
var=np.nanstd(dataset,axis=2)
lower=(mean - zscore_filter_level*var).T
upper=(mean + zscore_filter_level*var).T
lower=np.array([lower for i in range(dataset.shape[2])]).T
upper=np.array([upper for i in range(dataset.shape[2])]).T
outside_range=np.invert(log_and(dataset>lower,dataset<upper))
if drop_scans:
outside_range=np.tile(outside_range.any(axis=1,keepdims=True),(1,dataset.shape[1],1))
outside_range=np.tile(outside_range.any(axis=0,keepdims=True),(dataset.shape[0],1,1))
elif dump_times:
outside_range=np.tile(outside_range.any(axis=1,keepdims=True),(1,dataset.shape[1],1))
dataset[outside_range]=replace_values
list_of_projects=dataset
#############manual filtering################
else:
if baseunit is None:baseunit=ds.index.name
if units is None:units=ds.columns.name
if list_to_dump is not None:
if list_to_dump == 'single':
print('we will use a gui to select single scans to extract')
elif list_to_dump == 'range':
print('we will use a gui to select the first and last scan to remove')
else:
if not hasattr(list_to_dump,'__iter__'):#we have only a single number/name in there
list_to_dump = [list_to_dump]
filenames_to_dump = []
for entry in list_to_dump:
try:
filenames_to_dump.append(list_of_scans[entry].filename) #list_of_scans is a list of TA objects that have filename and if entry is an index of this list this goes well
except:
filenames_to_dump.append(entry)# we assume it is already a filename
list_to_dump = []
for filename in filenames_to_dump:
list_to_dump.append(list_of_scans.index(filename))
for i in range(30):#we make a maximum of 30 rounds
window1_index = [find_nearest_index(ds.index.values,window1[0]),find_nearest_index(ds.index.values,window1[1]),find_nearest_index(ds.columns.values,window1[2]),find_nearest_index(ds.columns.values,window1[3])]
series1 = pandas.Series(list_of_projects[window1_index[0]:window1_index[1],window1_index[2]:window1_index[3],:].mean(axis = (0,1)))
series1.name = '%.3g:%.3g %s at %.1f:%.1f %s'%(window1[0],window1[1],baseunit,window1[2],window1[3],units)
if not window2 is None:
window2_index = [find_nearest_index(ds.index.values,window2[0]),find_nearest_index(ds.index.values,window2[1]),find_nearest_index(ds.columns.values,window2[2]),find_nearest_index(ds.columns.values,window2[3])]
series2 = pandas.Series(list_of_projects[window2_index[0]:window2_index[1],window2_index[2]:window2_index[3],:].mean(axis = (0,1)))
series2.name = '%.3g:%.3g %s at %.1f:%.1f %s'%(window2[0],window2[1],baseunit,window2[2],window2[3],units)
fig,(ax,ax2) = plt.subplots(2,1,sharex = True,figsize = (16,12))
series1.plot(ax = ax,color = colm(1),use_index = False)
series2.plot(ax = ax2,color = colm(3),use_index = False)
if len(series1) >15:
gol=Frame_golay(series1,window=11,order=1)
gol.plot(ax=ax,use_index=False,color=colm(2))
ax.fill_between(x=range(len(series1)), y1=gol-series1.var(), y2=gol+series1.var(),color=colm(2),alpha=0.3)
gol=Frame_golay(series2,window=11,order=1)
gol.plot(ax=ax2,use_index=False,color=colm(4))
ax2.fill_between(x=range(len(series1)), y1=gol-2*series1.var(), y2=gol+2*series1.var(),color=colm(4),alpha=0.3)
else:
fig,ax=plt.subplots(1,1,sharex=True,figsize=(16,12))
series1.plot(ax=ax,color=colm(1),use_index=False)
if len(series1) >15:
gol=Frame_golay(series1,window=11,order=1)
gol.plot(ax=ax,use_index=False,color=colm(2))
ax.fill_between(x=range(len(series1)), y1=gol-2*series1.nanvar(), y2=gol+2*series1.nanvar(),color=colm(2),alpha=0.3)
if list_to_dump == 'single':
ax.set_title('click on the scans that should be dropped\n left click to chose, right click to delete last point, middle click finishes selection\n an empty middle click ends the process')
polypts=np.asarray(plt.ginput(n=int(len(series1)/2),timeout=300, show_clicks=True,mouse_add=1, mouse_pop=3, mouse_stop=2))
if len(polypts)<1:break
to_remove=[int(a) for a in np.array(polypts)[:,0]]
remove=pandas.Series(np.arange(len(series1)))+1
remove[to_remove]=0
to_remove=list(remove[remove<1].index.values)
to_keep=list(remove[remove>1].index.values)
elif (list_to_dump == 'range') or (i>0):
ax.set_title('click on the first and last scan to be removed, repeat as long as necessary\n an empty middle click ends the process')
polypts=np.asarray(plt.ginput(n=int(len(series1)/2),timeout=300, show_clicks=True,mouse_add=1, mouse_pop=3, mouse_stop=2))
if len(polypts)<1:break
polypts=np.array(polypts)[:,0]
remove=pandas.Series(np.arange(len(series1)))+1
for i in range(int(len(polypts)/2)):
remove.loc[polypts[2*i]:polypts[2*i+1]]=0
to_remove=list(remove[remove<1].index.values)
to_keep=list(remove[remove>1].index.values)
elif i == 0:
to_keep=list(range(len(series1)))
to_keep.remove(list_to_dump)
else:
raise ValueError('Something is weired')
list_of_projects=list_of_projects[:,:,to_keep]
plt.close('all')
plt.close('all')
try:
df=pandas.DataFrame(np.any(np.isnan(dataset),axis=1),index=ds.index)
plot2d(df,levels = 2,use_colorbar = False,intensity_range=[0,1],title='rejected are red')
except:
print('plotting of filtered went wrong')
ds=pandas.DataFrame(np.nanmean(list_of_projects,axis=2),index=ds.index,columns=ds.columns)
if not save_name is None:
path = str(check_folder(path=path,filename=save_name))
ds.to_csv(path,sep='\t')
ta=TA(path)
else:
path = str(check_folder(path=path,filename='temp_combined.SIA'))
ds.to_csv(path,sep='\t')
ta=TA(path)
try:
os.remove(path)
except:
print('could not remove temp_combined.SIA')
if return_ds_only:
return ds
elif return_removed_list:
return filenames_to_dump
else:
if base_TA_object is not None:
ta=base_TA_object.Copy()
ta.ds_ori=ds
ta.ds=ds
return ta
def sub_ds(ds, times = None, time_width_percent = 0, ignore_time_region = None, drop_ignore=False, wave_nm_bin = None,
baseunit = None, scattercut = None, drop_scatter=False, bordercut = None, timelimits = None, wavelength_bin = None,
wavelength = None, time_bin = None, equal_energy_bin = None, from_fit = False):
'''This is the main function that creates all the slices of the data matrix
Parameters
---------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
times : float or list/vector (of floats), optional
For each entry in rel_time a spectrum is plotted. If time_width_percent=0 (Default) the
nearest measured timepoint is chosen. For other values see 'time_width_percent'
time_width_percent : float
"rel_time" and "time_width_percent" work together for creating spectral plots at
specific timepoints. For each entry in rel_time a spectrum is plotted.
If however e.g. time_width_percent=10 the region between the timepoint closest
to the 1.1 x timepoint and 0.9 x timepoint is averaged and shown
(and the legend adjusted accordingly). This is particularly useful for the densly
sampled region close to t=0. Typically for a logarithmic recorded kinetics, the
timepoints at later times will be further appart than 10 percent of the value,
but this allows to elegantly combine values around time=0 for better statistics.
This averaging is only applied for the plotting function and not for the fits.
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
drop_ignore : Bool, True or False, optional
If set to True the values in ignore_time_region are removed from the dataset instead of set to zero
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
equal_energy_bin : None or float(optional)
if this is set the wave_nm_bin is ignored and the data is rebinned into equal energy bins.
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
drop_scatter : Bool, True or False, optional
If set to True the values in scattercut are removed from the dataset instead of set to zero
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
wavelength : float or list (of floats), optional
'wavelength' and 'wavelength_bin' work together for the creation
of kinetic plots. When plotting kinetic spectra one line will be plotted for each entrance
in the list/vector rel_wave. During object generation the vector np.arange(300,1000,100)
is set as standard. Another typical using style would be to define a list of interesting
wavelength at which a kinetic development is to be plotted. At each selected wavelength
the data between wavelength+ta.wavelength_bin and wavelength-ta.wavelength_bin is averaged
for each timepoint returned
wavelength_bin : float, optional
the width used in kinetics, see below (Default) None
time_bin : None or int, optional
is dividing the points on the time-axis in even bins and averages the found values in between.
This is a hard approach that also affects the fits. I do recommend to use this carefully,
it is most useful for modulated data. A better choice for transient absorption that only
affects the kinetics is 'time_width_percent'
'''
time_label=ds.index.name
energy_label=ds.columns.name
if (wavelength is not None) and (times is not None):raise ValueError('can not get wavelength and times back')
if (bordercut is not None) and not from_fit:
ds.columns=ds.columns.astype('float')
ds=ds.loc[:,bordercut[0]:bordercut[1]]
if (equal_energy_bin is not None) and (wavelength is None):# we work with optical data but want to bin in equal energy
x=ds.columns.values.astype('float')
y=ds.index.values.astype('float')
energy_label='Energy in eV'
x=scipy.constants.h*scipy.constants.c/(x*1e-9*scipy.constants.electron_volt)
if from_fit:#they are already binned
ds.columns=x
ds.sort_index(axis=1,ascending=False)
elif (x[1:]-x[:-1]>equal_energy_bin).all():
raise ValueError("equal_energy_bin bins are to small for the data")
else:
rebin_max=np.argmin((x[1:]-x[:-1])<equal_energy_bin)#find the position where the difference is larger than the wave_nm_bin
if rebin_max==0:rebin_max=len(x)# we get 0 when all teh values are ok
if rebin_max<len(x):
if (x[1:]-x[:-1]>equal_energy_bin).all():raise ValueError("equal_energy_bin bins are to small for the data")
bins=np.arange(x.min(),x[rebin_max],equal_energy_bin)
bin_means,bin_edges = binned_statistic(x[:rebin_max], ds.values[:,:rebin_max], statistic='mean',bins=bins)[:2]
bins=(bin_edges[1:]+bin_edges[:-1])/2.
ds=pandas.concat((pandas.DataFrame(bin_means,index=y,columns=bins),ds.iloc[:,rebin_max:]), axis=1, join='outer')
else:
bins=np.arange(x.min(),x.max()+equal_energy_bin,equal_energy_bin)
bin_means,bins = binned_statistic(x, ds.values, statistic='mean',bins=bins)[:2]
bins=(bins[1:]+bins[:-1])/2.
ds=pandas.DataFrame(bin_means,index=y,columns=bins)
elif (wave_nm_bin is not None) and (wavelength is None):# bin in wavelength
x=ds.columns.values.astype('float')
y=ds.index.values.astype('float')
if (x[1:]-x[:-1]>wave_nm_bin).all():raise ValueError("wavelength_nm_bins bins are to small for the data")
rebin_max=np.argmin((x[1:]-x[:-1])<wave_nm_bin)#find the position where the difference is larger than the wave_nm_bin
if rebin_max==0:rebin_max=len(x)# we get 0 when all teh values are ok
if rebin_max<len(x):
if (x[1:]-x[:-1]>wave_nm_bin).all():raise ValueError("wavelength_nm_bins bins are to small for the data")
bins=np.arange(x.min(),x[rebin_max],wave_nm_bin)
bin_means,bin_edges = binned_statistic(x[:rebin_max], ds.values[:,:rebin_max], statistic='mean',bins=bins)[:2]
bins=(bin_edges[1:]+bin_edges[:-1])/2.
ds=pandas.concat((pandas.DataFrame(bin_means,index=y,columns=bins),ds.iloc[:,rebin_max:]), axis=1, join='outer')
else:
bins=np.arange(x.min(),x.max()+wave_nm_bin,wave_nm_bin)
bin_means,bins = binned_statistic(x, ds.values, statistic='mean',bins=bins)[:2]
bins=(bins[1:]+bins[:-1])/2.
ds=pandas.DataFrame(bin_means,index=y,columns=bins)
if time_bin is not None:
time=ds.index.values.astype('float')
y=ds.columns.values.astype('float')
time_bin=int(time_bin)
time_bins=time[::time_bin]
bin_means,bins = binned_statistic(time, ds.values.T, statistic='mean',bins=time_bins)[:2]
bins=(bins[1:]+bins[:-1])/2.
ds=pandas.DataFrame(bin_means,index=y,columns=bins)
ds=ds.T
if timelimits is not None:
ds.index=ds.index.astype('float')
ds=ds.loc[timelimits[0]:timelimits[1],:]
if ignore_time_region is not None:
ds=ds.fillna(value=0)
ds.index=ds.index.astype('float')
if isinstance(ignore_time_region[0], numbers.Number):
if drop_ignore:
ds.loc[ignore_time_region[0]:ignore_time_region[1],:]=np.nan
else:
ds.loc[ignore_time_region[0]:ignore_time_region[1],:]=0
else:
try:
for entries in ignore_time_region:
if drop_ignore:
ds.loc[entries[0]:entries[1],:]=np.nan
else:
ds.loc[entries[0]:entries[1],:]=0
except:
pass
ds=ds.dropna(axis=0)
if scattercut is not None:
ds=ds.fillna(value=0)
x=ds.columns.values.astype('float')
if isinstance(scattercut[0], numbers.Number):
if (equal_energy_bin is not None):
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
scattercut=scattercut[::-1]
lower=find_nearest_index(x,scattercut[0])
upper=find_nearest_index(x,scattercut[1])
if drop_scatter:
ds.iloc[:,lower:upper]=np.nan
else:
ds.iloc[:,lower:upper]=0
else:
try:
for entries in scattercut:
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
scattercut=scattercut[::-1]
lower=find_nearest_index(x,entries[0])
upper=find_nearest_index(x,entries[1])
if drop_scatter:
ds.iloc[:,lower:upper]=np.nan
else:
ds.iloc[:,lower:upper]=0
except:
pass
ds=ds.dropna(axis=1)
#until here we always have the same matrix
ds.index.name=time_label
ds.columns.name=energy_label
if wavelength is not None:#ok we want to have singular wavelength
if not hasattr(wavelength,'__iter__'):wavelength=np.array([wavelength])
if len(wavelength)>1:wavelength.sort()
for i,wave in enumerate(wavelength):
upper=wave+wavelength_bin/2
lower=wave-wavelength_bin/2
if equal_energy_bin is not None and from_fit:
upper=scipy.constants.h*scipy.constants.c/(lower*1e-9*scipy.constants.electron_volt)
lower=scipy.constants.h*scipy.constants.c/(upper*1e-9*scipy.constants.electron_volt)
wave=scipy.constants.h*scipy.constants.c/(wave*1e-9*scipy.constants.electron_volt)
if i == 0:
out=ds.loc[:,lower:upper].mean(axis='columns').to_frame()
out.columns = [wave]
else:
if wave in out.columns:continue
out[wave] = ds.loc[:,lower:upper].mean(axis='columns')
out.columns=out.columns.astype('float')
out.columns.name=energy_label
out.index.name=time_label
ds=out
if times is not None: #ok we want to have single times
if not hasattr(times, '__iter__'):times=np.array([times])
if baseunit is None:baseunit = 'ps'
time_scale=ds.index.values
if time_width_percent>0:
for i,time in enumerate(times):
if time<0:
limits = [find_nearest_index(time_scale,time+time*time_width_percent/100.),
find_nearest_index(time_scale,time-time*time_width_percent/100.)]
else:
limits = [find_nearest_index(time_scale,time-time*time_width_percent/100.),
find_nearest_index(time_scale,time+time*time_width_percent/100.)]
time_lower = time_scale[limits[0]]
time_upper = time_scale[limits[1]]
time_mean = (time_lower+time_upper)/2
if i == 0:
out=ds.iloc[limits[0]:limits[1],:].mean(axis='rows').to_frame()
out.columns = ['%.3g %s (%.3g - %.3g %s)'%(time_mean,baseunit,time_lower,time_upper,baseunit)]
else:
out['%.3g %s (%3g - %.3g %s)'%(time_mean,baseunit,time_lower,time_upper,baseunit)]=ds.iloc[limits[0]:limits[1],:].mean(axis='rows').to_frame()
else:
for i,time in enumerate(times):
index=find_nearest_index(time_scale,time)
if i == 0:
out=ds.iloc[index,:].to_frame()
out.columns=['%.3g %s'%(time_scale[index],baseunit)]
else:
out['%.3g %s'%(time_scale[index],baseunit)]=ds.iloc[index,:]
out.columns.name=time_label
out.index.name=energy_label
ds=out
#ds.index.name='Wavelength in nm'
ds.fillna(value=0,inplace=True)#lets fill nan values with zero to catch problems
if equal_energy_bin is not None:
ds.sort_index(axis=1,inplace=True,ascending=False)
return ds
def plot2d(ds, ax = None, title = None, intensity_range = None, baseunit = 'ps', timelimits = None,
scattercut = None, bordercut = None, wave_nm_bin = None, ignore_time_region = None,
time_bin = None, log_scale = False, plot_type = 'symlog', lintresh = 1,
wavelength_bin = None, levels = 256, use_colorbar = True, cmap = None,
data_type = 'differential Absorption in $\mathregular{\Delta OD}$', equal_energy_bin = None, from_fit = False):
'''function for plotting matrix of TA data.
Parameters
---------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
ax : None, matplotlib axis object optional
If None (Default) a new plot is is created and a new axis, otherwise ax needs to be Matplotlib Axis
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
title : None or str
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will
be used in all plots. To remove the title all together set an empty string with this command title=""
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
equal_energy_bin : None or float(optional)
if this is set the wave_nm_bin is ignored and the data is rebinned into equal energy bins (based upon that the data is in nm.
If dual axis is on then the lower axis is energy and the upper is wavelength
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
time_bin : None or int, optional
is dividing the points on the time-axis in even bins and averages the found values in between.
This is a hard approach that also affects the fits. I do recommend to use this carefully,
it is most useful for modulated data. A better choice for transient absorption that only
affects the kinetics is 'time_width_percent'
log_scale : bool, optional
If True (Default), The 2D plots (Matrix) is plotted with a pseudo logarithmic intensity scale.
This usually does not give good results unless the intensity scale is symmetric
plot_type : None or str
is a general setting that can influences what time axis will be used for the plots.
"symlog" (linear around zero and logarithmic otherwise) "lin" and "log" are valid options.
lintresh : float
The pseudo logratihmic range "symlog" is used for most time axis. Symlog plots a range around
time zero linear and beyond this linear treshold 'lintresh' on a logarithmic scale. (Default) 0.3
wavelength_bin : float, optional
the width used in kinetics, see below (Default) 10nm
levels : int, optional
how many different colours to use in the description. less makes for more contrast but less
intensity details (Default) 256
use_colorbar : bool, optional
if True (Default) a colour bar is added to the 2d plot for intensity explanation, switch
mostely used for creating multiple plots
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
from_fit : bool optional
it needed this swtich to avoid re-slicing of data in spectal axis for equal energy bins
'''
if cmap is None:
cmap=standard_map
elif not np.array([isinstance(cmap,type(cm.viridis)),isinstance(cmap,type(cm.jet)),isinstance(cmap,type(cm.Blues)),isinstance(cmap,type(cm.coolwarm)),isinstance(cmap,type(cm.terrain))]).any():#we must have a
cmap=standard_map
if ax is None:
ax_ori=False
fig,ax=plt.subplots(figsize=(10,6),dpi=100)
else:
ax_ori=True
fig=ax.get_images()
if timelimits is None:
timelimits=(ds.index.min(),ds.index.max())
ds = sub_ds(ds, scattercut = scattercut, bordercut = bordercut, timelimits = timelimits, wave_nm_bin = wave_nm_bin,
wavelength_bin = wavelength_bin, time_bin = time_bin, ignore_time_region = ignore_time_region,
drop_scatter = False, drop_ignore = False, equal_energy_bin = equal_energy_bin, from_fit = from_fit)
if intensity_range is None:
try:
maxim=max([abs(ds.values.min()),abs(ds.values.max())])
intensity_range=[-maxim,maxim]
except:
intensity_range=[-1e-2,1e-2]
else:
if not hasattr(intensity_range,'__iter__'):#lets have an lazy option
intensity_range=[-intensity_range,intensity_range]
else:
if log_scale:print('I highly recommend to make a symmetric intensity distribution for logarithmic scale, the colorbar might look strange otherwise')
if log_scale:
bounds0 = list(-1*np.logspace(np.log10(-intensity_range[0]), np.log10(-intensity_range[0]/(levels/2)), levels))
bounds1 = np.logspace(np.log10(intensity_range[1]/(levels/2)),np.log10(intensity_range[1]), levels)
bounds0.append(0)
for a in bounds1:
bounds0.append(a)
norm = colors.BoundaryNorm(boundaries=bounds0, ncolors=len(bounds0))
mid_color=colm(k=range(levels),cmap=cmap)[int((levels-levels%2)/2)]
#norm=colors.SymLogNorm(levels,linthresh=1e-5, linscale=1e-5,vmin=intensity_range[0], vmax=intensity_range[1])
else:
nbins=levels
levels = MaxNLocator(nbins=levels).tick_values(intensity_range[0], intensity_range[1])
norm = BoundaryNorm(levels,clip=True,ncolors=cmap.N)
mid_color_index=find_nearest_index(0,levels)
mid_color=colm(k=range(nbins),cmap=cmap)
mid_color=mid_color[mid_color_index]
#print(ds.head())
x = ds.columns.values.astype('float')
y = ds.index.values.astype('float')
X, Y = np.meshgrid(x, y)
img=ax.pcolormesh(X,Y,ds.values,norm=norm,cmap=cmap,shading=shading)
if ignore_time_region is None:
pass
elif isinstance(ignore_time_region[0], numbers.Number):
ds.index=ds.index.astype(float)
try:
upper=ds.loc[ignore_time_region[1]:,:].index.values.min()
lower=ds.loc[:ignore_time_region[0],:].index.values.max()
if equal_energy_bin is not None:
rect = plt.Rectangle((x.max(),lower), width=abs(ax.get_xlim()[0]-ax.get_xlim()[1]), height=abs(upper-lower),facecolor=mid_color,alpha=1)#mid_color)
else:
rect = plt.Rectangle((x.min(),lower), width=abs(ax.get_xlim()[1]-ax.get_xlim()[0]), height=abs(upper-lower),facecolor=mid_color,alpha=1)#mid_color)
ax.add_patch(rect)
except:
pass
else:
ignore_time_region_loc=flatten(ignore_time_region)
for k in range(int(len(ignore_time_region_loc)/2+1)):
try:
upper=ds.loc[ignore_time_region[k+1]:,:].index.values.min()
lower=ds.loc[:ignore_time_region[k],:].index.values.max()
if equal_energy_bin is not None:
rect = plt.Rectangle((x.max(),lower), width=abs(ax.get_xlim()[0]-ax.get_xlim()[1]), height=abs(upper-lower),facecolor=mid_color,alpha=1)
else:
rect = plt.Rectangle((x.min(),lower), width=abs(ax.get_xlim()[1]-ax.get_xlim()[0]), height=abs(upper-lower),facecolor=mid_color,alpha=1)
ax.add_patch(rect)
except:
pass
if scattercut is None:
pass
elif isinstance(scattercut[0], numbers.Number):
try:
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
scattercut=scattercut[::-1]
upper=ds.loc[:,scattercut[1]:].columns.values.min()
lower=ds.loc[:,:scattercut[0]].columns.values.max()
width=abs(upper-lower)
rect = plt.Rectangle((lower,y.min()), height=abs(ax.get_ylim()[1]-ax.get_ylim()[0]), width=width, facecolor=mid_color,alpha=1)#mid_color)
ax.add_patch(rect)
except:
pass
else:
scattercut=flatten(scattercut)
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
scattercut=scattercut[::-1]
for k in range(int(len(scattercut)/2+1)):
try:
upper=ds.loc[:,scattercut[k][1]:].columns.values.min()
if upper==0:raise
lower=ds.loc[:,:scattercut[k][0]].columns.values.max()
rect = plt.Rectangle((lower.min()), height=abs(ax.get_ylim()[1]-ax.get_ylim()[0]), width=abs(upper-lower),facecolor=mid_color,alpha=1)#mid_color)
ax.add_patch(rect)
except:
pass
if use_colorbar:
mid=(intensity_range[1]+intensity_range[0])/2
if log_scale:
values=[intensity_range[0],mid-abs(intensity_range[0]-mid)/10,mid,mid+abs(intensity_range[1]-mid)/10,intensity_range[1]]
else:
values=[intensity_range[0],intensity_range[0]+abs(intensity_range[0]-mid)/2,mid,intensity_range[1]-abs(intensity_range[1]-mid)/2,intensity_range[1]]
labels=['%.2g'%(a) for a in values]
labels[0]='<' + labels[0]
labels[-1]='>'+labels[-1]
cbar=plt.colorbar(img, ax=ax,ticks=values,pad=0.01)
cbar.ax.set_yticklabels(labels)
a=ax.yaxis.label
fontsize=a.get_fontsize()
fontsize-=4
if not data_type is None:#we use this as a switch to enable a flexible avoidance of the label setting.
if log_scale:
if ax_ori:cbar.set_label(data_type + '\nLog-scale', rotation=270,labelpad=20,fontsize=fontsize)
else:cbar.set_label(data_type + '\nLog-scale', rotation=270,labelpad=20,fontsize=fontsize)
else:
if ax_ori:cbar.set_label(data_type, rotation=270,labelpad=20,fontsize=fontsize)
else:cbar.set_label(data_type, rotation=270,labelpad=20,fontsize=fontsize)
if "symlog" in plot_type:
ax.plot(ax.get_xlim(),[lintresh,lintresh],'black',lw=0.5,alpha=0.3)
ax.plot(ax.get_xlim(),[-1.0*lintresh,-1.0*lintresh],'black',lw=0.5,alpha=0.3)
ax.plot(ax.get_xlim(),[0,0],'black',lw=0.5,alpha=0.6)
if 1:
ax.set_yscale('symlog', linthresh=lintresh)
locmaj = matplotlib.ticker.LogLocator(base=10.0, subs=(0.1,1.0,10.,1e2,1e3,1e4))
ax.yaxis.set_major_locator(locmaj)
locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(0.1,1,0.1))
ax.yaxis.set_minor_locator(locmin)
ticks=list(ax.get_yticks())
ticks.append(lintresh)
[ticks.append(a) for a in [-0.3,-1,-2,-5,-10]]
ticks.sort()
if timelimits[1]>100:
ticks=np.array(ticks)
ticks=np.concatenate((ticks.clip(min=0.1),np.zeros(1),ticks.clip(max=-0.1,min=timelimits[0])),axis=0)
ax.set_yticks(ticks)
else:
print('here2')
ax.set_yscale('symlog', linthresh=lintresh,subsy=range(2,9),linscaley=lintresh)
ax.set_ylim(y.min(),y.max())
elif "log" in plot_type:
lower_time=max(1e-6,timelimits[0])
ax.set_ylim(lower_time,y.max())
ax.set_yscale('log')
else:
ax.set_yscale('linear')
ax.set_ylim(timelimits)
if bordercut is not None:
try:
if equal_energy_bin is not None:
bordercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in bordercut]
ax.set_xlim(bordercut[0],bordercut[1])
except:
print('bordercut failed')
pass
if equal_energy_bin is not None and False:
temp=np.array(ax.get_xlim())
ax.set_xlim(temp.max(),temp.min())
ax.set_xlabel(ds.columns.name)
ax.set_ylabel(ds.index.name)
if title:
ax.set_title(title)
if ax_ori:return ax
return fig
def plot2d_fit(re, error_matrix_amplification=5, use_images=True, patches=False, title = None,
intensity_range = None, baseunit = 'ps', timelimits = None,
scattercut = None, bordercut = None, wave_nm_bin = None, ignore_time_region = None,
time_bin = None, log_scale = False, scale_type = 'symlog', lintresh = 1,
wavelength_bin = None, levels = 256, plot_with_colorbar = True, cmap = None,
data_type = 'differential Absorption in $\mathregular{\Delta OD}$', equal_energy_bin = None):
'''Plots the fit output as a single plot with meas,fitted and difference.
The differnece used err_matrix_amplification as a factor. patches moves the labels from the
title into white patches in the top of the figure
Parameters
---------------
re : dict
Dictionary that contains the fit results and specific the dataframes A, AC and AE
error_matrix_amplification : int, optional
the error matrix AE is multiplied by this factor for the plot.
use_images : bool:
(Default)True converts the matrix into images, to reduce the filesize.
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
patches : bool, optional
If False (Default) the names "measured" "fitted" "difference" will be placed above the images.
If True, then they will be included into the image (denser)
title : None or str
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will
be used in all plots. To remove the title all together set an empty string with this command title=""
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
equal_energy_bin : None or float(optional)
if this is set the wave_nm_bin is ignored and the data is rebinned into equal energy bins (based upon that the data is in nm.
If dual axis is on then the lower axis is energy and the upper is wavelength
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
time_bin : None or int, optional
is dividing the points on the time-axis in even bins and averages the found values in between.
This is a hard approach that also affects the fits. I do recommend to use this carefully,
it is most useful for modulated data. A better choice for transient absorption that only
affects the kinetics is 'time_width_percent'
log_scale : bool, optional
If True (Default), The 2D plots (Matrix) is plotted with a pseudo logarithmic intensity scale.
This usually does not give good results unless the intensity scale is symmetric
Scale_type : None or str
is a general setting that can influences what time axis will be used for the plots.
"symlog" (linear around zero and logarithmic otherwise) "lin" and "log" are valid options.
lintresh : float
The pseudo logratihmic range "symlog" is used for most time axis. Symlog plots a range around
time zero linear and beyond this linear treshold 'lintresh' on a logarithmic scale. (Default) 0.3
wavelength_bin : float, optional
the width used in kinetics, see below (Default) 10nm
levels : int, optional
how many different colours to use in the description. less makes for more contrast but less
intensity details (Default) 256
plot_with_colorbar : bool, optional
if True (Default) a colour bar is added to the 2d plot for intensity explanation, switch
mostely used for creating multiple plots
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
'''
if intensity_range is None:intensity_range=5e-3
fig,ax=plt.subplots(3,figsize=(9,11))
if patches:
plot2d(re['A'], cmap = cmap, log_scale = log_scale, intensity_range = intensity_range, ax = ax[0],
baseunit = baseunit, use_colorbar = plot_with_colorbar, levels = levels, plot_type = scale_type,
ignore_time_region = ignore_time_region, lintresh = lintresh, bordercut = bordercut,
scattercut = scattercut, timelimits = timelimits, data_type = data_type, equal_energy_bin = equal_energy_bin, from_fit = True)
plot2d(re['AC'], cmap = cmap, log_scale = log_scale, intensity_range = intensity_range, ax = ax[1],
baseunit = baseunit, use_colorbar = plot_with_colorbar, levels = levels, plot_type = scale_type,
ignore_time_region = ignore_time_region, lintresh = lintresh, bordercut = bordercut,
scattercut = scattercut, timelimits = timelimits, data_type = data_type, equal_energy_bin = equal_energy_bin, from_fit = True)
plot2d(re['AE'], cmap = cmap, log_scale = log_scale, intensity_range = np.array(intensity_range)/error_matrix_amplification, ax = ax[2],
baseunit = baseunit, use_colorbar = plot_with_colorbar, levels = levels, plot_type = scale_type,
ignore_time_region = ignore_time_region, lintresh = lintresh, bordercut = bordercut, scattercut = scattercut,
timelimits = timelimits, data_type = data_type, equal_energy_bin = equal_energy_bin, from_fit = True)
for i in range(3):
ax[i].set_title(label='')
stringen=['measured','calculated','difference']
x_width=(ax[i].get_xlim()[1]-ax[i].get_xlim()[0])/4
if 'lin' in scale_type:
y_width=(ax[i].get_ylim()[1])/8
else:
y_width=(ax[i].get_ylim()[1])/1.5
rect = plt.Rectangle((ax[i].get_xlim()[1]-x_width, ax[i].get_ylim()[1]-y_width), x_width, y_width,facecolor="white", alpha=0.5)
ax[i].add_patch(rect)
ax[i].text(ax[i].get_xlim()[1]-x_width+x_width*0.1,ax[i].get_ylim()[1]-y_width+y_width*0.1,stringen[i],fontsize=16)
fig.subplots_adjust(left=0.15, bottom=0.067, right=0.97, top=0.985, wspace=0.0, hspace=0.258)
else:
plot2d(re['A'], cmap = cmap, title = 'Measured', log_scale = log_scale, intensity_range = intensity_range,
ax = ax[0], baseunit = baseunit, use_colorbar = plot_with_colorbar, levels = levels, plot_type = scale_type,
ignore_time_region = ignore_time_region, lintresh = lintresh, bordercut = bordercut, scattercut = scattercut,
timelimits = timelimits, data_type = data_type, equal_energy_bin = equal_energy_bin, from_fit = True)
plot2d(re['AC'], cmap = cmap, title = 'Calculated', log_scale = log_scale, intensity_range = intensity_range,
ax = ax[1], baseunit = baseunit, use_colorbar = plot_with_colorbar, levels = levels, plot_type = scale_type,
ignore_time_region = ignore_time_region, lintresh = lintresh, bordercut = bordercut, scattercut = scattercut,
timelimits = timelimits , data_type = data_type, equal_energy_bin = equal_energy_bin, from_fit = True)
plot2d(re['AE'], cmap = cmap, title = 'Difference', log_scale = log_scale, intensity_range = np.array(intensity_range)/error_matrix_amplification,
ax = ax[2], baseunit = baseunit, use_colorbar = plot_with_colorbar, levels = levels, plot_type = scale_type,
ignore_time_region = ignore_time_region, lintresh = lintresh, bordercut = bordercut, scattercut = scattercut,
timelimits = timelimits, data_type = data_type, equal_energy_bin = equal_energy_bin, from_fit = True)
#fig.subplots_adjust(left=0.15, bottom=0.067, right=0.97, top=0.97, wspace=0.0, hspace=0.398)
fig.tight_layout()
return fig
def plot_fit_output( re, ds, cmap = standard_map, plotting = range(6), title = None, path = None, filename = None, f = 'standard',
intensity_range = 1e-2, baseunit = 'ps', timelimits = None, scattercut = None, bordercut = None,
error_matrix_amplification = 20, wave_nm_bin = 5, rel_wave = None, width = 10, rel_time = [1, 5, 10],
time_width_percent = 10, ignore_time_region = None, save_figures_to_folder = True, log_fit = False, mod = None,
subplot = False, color_offset = 0, log_scale = True, savetype = 'png', evaluation_style = False, lintresh = 1,
scale_type = 'symlog', patches = False, print_click_position = False,
data_type = 'differential Absorption in $\mathregular{\Delta OD}$', plot_second_as_energy = True, units = 'nm',
equal_energy_bin = None):
'''Purly manual function that plots all the fit output figures. Quite cumbersome,
but offers a lot of manual options. The figures can be called separately
or with a list of plots. e.g. range(6) call plots 0-5 Manual plotting of certain type:
This is a wrapper function that triggers the plotting of all the fitted plots.
The parameter in this plot call are to control the general look and features of the plot.
Which plots are printed is defined by the command (plotting)
The plots are generated from the fitted Matrixes and as such only will work after a fit was actually
completed (and the "re" dictionary attached to the object.)
In all plots the RAW data is plotted as dots and the fit with lines
*Contents of the plots*
0. DAC contains the assigned spectra for each component of the fit. For
a modelling with independent exponential decays this corresponds to
the "Decay Associated Spectra" (DAS). For all other models this
contains the "Species Associated Spectra" (SAS). According to the
model the separate spectra are labeled by time (process) or name, if
a name is associated in the fitting model. The spectra are shown in
the extracted strength in the right pane and normalized in the left.
Extracted strength means that the measured spectral strength is the
intensity (concentration matrix) times this spectral strength. As the
concentration maxima for all DAS are 1 this corresponds to the
spectral strength for the DAS. (please see the documentation for the
fitting algorithm for further details)
1. summed intensity. All wavelength of the spectral axis are summed for
data and fit. The data is plotted in a number of ways vs linear and
logarithmic axis. This plot is not ment for publication but very
useful to evaluate the quality of a fit.
2. plot kinetics for selected wavelength (see corresponding RAW plot)
3. plot spectra at selected times (see corresponding RAW plot)
4. plots matrix (measured, modelled and error Matrix). The parameter are
the same as used for the corresponding RAW plot with the addition of
"error_matrix_amplification" which is a scaling factor multiplied
onto the error matrix. I recommend to play with different "cmap",
"log_scale" and "intensity_scale" to create a pleasing plot
5. concentrations. In the progress of the modelling/fitting a matrix is
generated that contains the relative concentrations of the species
modelled. This plot is showing the temporal development of these
species. Further details on how this matrix is generated can be found
in the documentation of the fitting function. The modeled spectra are
the convolution of these vectors (giving the time-development) and
the DAS/SAS (giving the spectral development).
Parameters
---------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
re : dict
Dictionary that contains the fit results and specific the dataframes A, AC and AE
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
error_matrix_amplification : int, optional
the error matrix AE is multiplied by this factor for the plot.
plotting : int or iterable (of integers), optional
This parameter determines which figures are plotted
the figures can be called separately with plotting = 1
or with a list of plots (Default) e.g.~plotting=range(6) calls plots 0,1,2,3,4,5
The plots have the following numbers:\
0 - DAS or SAS\
1 - summed intensity\
2 - Kinetics\
3 - Spectra\
4 - Matrixes\
5 - Concentrations (the c-object)\
The plotting takes all parameter from the "ta" object unless otherwise specified
path : None, str or path object, optional
This defines where the files are saved if the safe_figures_to_folder parameter is True,
quite useful if a lot of data sets are to be printed fast.
If a path is given, this is used. If a string like the (Default) "result_figures" is given,
then a subfolder of this name will be used (an generated if necessary)
relative to self.path. Use and empty string to use the self.path
If set to None, the location of the plot_func will be used and
a subfolder with title "result_figures" be generated here
savetype : str or iterable (of str), optional
matplotlib allows the saving of figures in various formats. (Default) "png",
typical and recommendable options are "svg" and "pdf".
evaluation_style : bool, optional
True (Default = False) adds a lot of extra information in the plot
title : None or str, optional
"title=None" is in general the filename that was loaded. Setting a
specific title will be used in all plots. To remove the title all
together set an empty string with title=""
scale_type : str, optional
refers to the time-axis and takes, ’symlog’ (Default)(linear around zero and logarithmic otherwise)
and ‘lin’ for linear and ‘log’ for logarithmic, switching all the time axis to this type
patches : bool, optional
If False (Default) the names "measured" "fitted" "difference" will be placed above the images.
If True, then they will be included into the image (denser)
filename : str, optional
offers to replace the base-name used for all plots (to e.g.~specify what sample was used).
if (Default) None is used, the self.filename is used as a base name. The filename plays only a
role during saving, as does the path and savetype
save_figures_to_folder : bool, optional
(Default) is True, if True the Figures are automatically saved
log_scale : bool, optional
If True (Default), The 2D plots (Matrix) is plotted with a pseudo logarithmic intensity scale.
This usually does not give good results unless the intensity scale is symmetric
subplot : bool, optional
If False (Default) axis labels and such are set. If True, we plot into the same axis and
do not set labels
color_offset : int, optional
At the (Default) 0 the colours are chose from the beginning, for a larger value Color_offset
colors are skipped. Usually only used if multiple plots are created, and the data/or fit is
only shown for some of them.
lintresh : float
The pseudo logratihmic range "symlog" is used for most time axis. Symlog plots a range around
time zero linear and beyond this linear treshold 'lintresh' on a logarithmic scale. (Default) 1
rel_time : float or list/vector (of floats), optional
For each entry in rel_time a spectrum is plotted. If time_width_percent=0 (Default) the
nearest measured timepoint is chosen. For other values see 'time_width_percent'
time_width_percent : float
"rel_time" and "time_width_percent" work together for creating spectral plots at
specific timepoints. For each entry in rel_time a spectrum is plotted.
If however e.g. time_width_percent=10 the region between the timepoint closest
to the 1.1 x timepoint and 0.9 x timepoint is averaged and shown
(and the legend adjusted accordingly). This is particularly useful for the densly
sampled region close to t=0. Typically for a logarithmic recorded kinetics, the
timepoints at later times will be further appart than 10 percent of the value,
but this allows to elegantly combine values around time=0 for better statistics.
This averaging is only applied for the plotting function and not for the fits.
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
width : float, optional
the width used in kinetics, see below (Default) 10nm
rel_wave : float or list (of floats), optional
'rel_wave' and 'width' (in the object called 'wavelength_bin' work together for the creation
of kinetic plots. When plotting kinetic spectra one line will be plotted for each entrance
in the list/vector rel_wave. During object generation the vector np.arange(300,1000,100)
is set as standard. Another typical using style would be to define a list of interesting
wavelength at which a kinetic development is to be plotted. At each selected wavelength
the data between wavelength+ta.wavelength_bin and wavelength-ta.wavelength_bin is averaged
for each timepoint returned
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
equal_energy_bin : None or float(optional)
if this is set the wave_nm_bin is ignored and the data is rebinned into equal energy bins (based upon that the data is in nm.
If dual axis is on then the lower axis is energy and the upper is wavelength
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
f : str
f is a replacement title that is set instead of the title. mainly used to have some options
(Default) is 'standard'
log_fit : bool, optional
(default)= False Used for legend generation, tells if the fit was in log or lin space
mod : str, optional
Used for legend generation, tells what model was used for fitting
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
print_click_position : bool, optional
if True then the click position is printed for the spectral plots
Examples
------------
>>> ta.plot_fit_output(ta.re,ta.ds)
'''
if baseunit != 'ps':
if baseunit == 'ns':baseunit = 'Time in ns'
re['A'].index.name=baseunit
re['AC'].index.name=baseunit
re['AE'].index.name=baseunit
ds.index.name=baseunit
re['c'].index.name=baseunit
if width is None:width=wave_nm_bin
stringen=[]
timedf=re['fit_results_times']
if mod is not None:
if not isinstance(mod,type('hello')):mod='ext. func.'
if evaluation_style:
if mod is not None:
stringen.append('Fit with Model: %s'%mod)
timedf.rename(index={'resolution': "res"},inplace=True)
timedf.rename(columns={'init_value': "init"},inplace=True)
try:
stringen.append(timedf.to_string(columns = ['value','init','vary','min','max','expr'],
float_format = '{:.3g}'.format, justify = 'center'))
except:
print('something strange happened, most likely one value went "inf" or is set unexpectedly to None')
else:
if mod is not None:
if mod in ['paral','exponential']:stringen.append('Fit with ind.\nexpon. decays:')
else:stringen.append('Fit with time parameters:')
try:
timedf.drop(index=['resolution','t0'],inplace=True)
except:
pass
stringen.append(timedf.to_string(columns=['value'],float_format='{:.3g}'.format,header=False))
stringen='\n'.join(stringen)
times=timedf[timedf.is_rate].loc[:,'value'].values
time_string=timedf[timedf.is_rate].to_string(columns=['value'],float_format='{:.3g}'.format,header=False)
if not hasattr(plotting,'__iter__'):plotting=[plotting]
if 0 in plotting:#DAC
#-------plot DAC------------
#for i,col in enumerate(re['DAC']):
#re['DAC'].iloc[:,i]=re['DAC'].iloc[:,i].values*re['c'].max().iloc[i]
fig1,(ax1a,ax1b,ax1c)=plt.subplots(1,3,figsize=(12,5),dpi=100)
n_colors=len(re['DAC'].columns)
DAC=re['DAC']
DAC_copy=DAC.copy()
normed=(DAC/DAC.abs().max())
for i,col in enumerate(DAC_copy):
DAC_copy.iloc[:,i]=DAC_copy.iloc[:,i].values*re['c'].max().iloc[i]
if scattercut is None:
DAC.plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap))
normed.plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap))
DAC_copy.plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap))
elif isinstance(scattercut[0], numbers.Number):
DAC.loc[:scattercut[0],:].plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap))
DAC.loc[scattercut[1]:,:].plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
normed.loc[:scattercut[0],:].plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap))
normed.loc[scattercut[1]:,:].plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
DAC_copy.loc[:scattercut[0],:].plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap))
DAC_copy.loc[scattercut[1]:,:].plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
else:
try:
scattercut=flatten(scattercut)
for i in range(len(scattercut)/2+1):
if i==0:
DAC.loc[:scattercut[0],:].plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap))
normed.loc[:scattercut[0],:].plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap))
DAC_copy.loc[:scattercut[0],:].plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap))
elif i<(len(scattercut)/2):
DAC.loc[scattercut[2*i-1]:scattercut[2*i],:].plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
normed.loc[scattercut[2*i-1]:scattercut[2*i],:].plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
DAC_copy.loc[scattercut[2*i-1]:scattercut[2*i],:].plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
else:
DAC.loc[scattercut[-1]:,:].plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
normed.loc[scattercut[-1]:,:].plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
DAC_copy.loc[scattercut[-1]:,:].plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap), label='_nolegend_')
except:
DAC.plot(ax=ax1b,color=colm(range(n_colors),cmap=cmap))
normed.plot(ax=ax1a,color=colm(range(n_colors),cmap=cmap))
DAC_copy.plot(ax=ax1c,color=colm(range(n_colors),cmap=cmap))
if mod in ['paral','exponential']:
try:
names=['decay %i: %.3g %s'%(i,a,baseunit) for i,a in enumerate(times)]
except:
print('something strange happened, most likely one value went "inf" or is set unexpectedly to None')
names=['decay %i: %s %s'%(i,a,baseunit) for i,a in enumerate(times)]
if 'background' in list(re['DAC'].columns):names.append('background')
if 'Non Decaying' in list(re['DAC'].columns):names.append('Non Decaying')
ax1a.legend(names,title='Model: {}'.format(mod))
ax1b.legend(names,title='Model: {}'.format(mod))
ax1c.legend(names,title='Model: {}'.format(mod))
elif mod in ['exp']:
names=['species %i'%i for i,a in enumerate(re['DAC'].columns.values)]
if 'background' in list(re['DAC'].columns):
if 'Non Decaying' in list(re['DAC'].columns):
names[-1]='background'
names[-2]='Non Decaying'
else:
names[-1]='background'
else:
if 'Non Decaying' in list(re['DAC'].columns):
names[-1]='Non Decaying'
ax1a.legend(names,title='Model: {}'.format(mod))
ax1b.legend(names,title='Model: {}'.format(mod))
ax1c.legend(names,title='Model: {}'.format(mod))
else:
names=['%s'%a for a in re['DAC'].columns.values]
ax1a.legend(names,title='Model: {}'.format(mod))
ax1b.legend(names,title='Model: {}'.format(mod))
ax1c.legend(names,title='Model: {}'.format(mod))
#ax1c.legend(time_string,title='Model: {}'.format(mod))
if title is None:
ax1a.set_title(f)
else:
if len(title)>0:
ax1a.set_title(title)
if title is None:
ax1b.set_title(f)
else:
if len(title)>1:ax1b.set_title(title)
if title is None:
ax1c.set_title(f)
else:
if len(title)>1:ax1c.set_title(title)
ax1a.plot(ax1a.get_xlim(),[0,0],'black',zorder=10)
ax1b.plot(ax1b.get_xlim(),[0,0],'black',zorder=10)
ax1c.plot(ax1b.get_xlim(),[0,0],'black',zorder=10)
ax1a.set_xlabel(ds.columns.name)
ax1b.set_xlabel(ds.columns.name)
ax1c.set_xlabel(ds.columns.name)
ax1a.set_ylabel('intensity norm.')
ax1b.set_ylabel('intensity in arb. units')
ax1c.set_ylabel('intensity*max(c) in arb. units')
fig1.tight_layout()
if 1 in plotting: #-------plot sum_sum------------
fig2 = plt.figure(figsize = (18, 5), dpi = 100)
ax2a=[plt.subplot2grid((3, 3), (0, i)) for i in range(3)]
ax2=[plt.subplot2grid((3, 3), (1, i), rowspan=2) for i in range(3)]
dat = [pandas.DataFrame(re['A'], index = re['A'].index, columns = re['A'].columns).abs().sum(axis = 1)]
dat.append(pandas.DataFrame(re['AC'], index = re['AC'].index, columns = re['AC'].columns).abs().sum(axis = 1))
dat.append(pandas.DataFrame(re['AE'], index = re['AE'].index, columns = re['AE'].columns).abs().sum(axis = 1))
dat_names=['measured','calculated','error']
dat_styles=['*','-','-']
dat_cols=colm(range(3), cmap = cmap)
limits = (dat[0].min(), dat[0].max())
xlimits = (dat[0].index.min(), dat[0].index.max())
if ignore_time_region is None:
for i in range(3):
for j in range(3):
if i==2:
_ = dat[i].plot(ax = ax2a[j], label = dat_names[i], style = dat_styles[i], color = dat_cols[i])
else:
_ = dat[i].plot(ax = ax2[j], label = dat_names[i], style = dat_styles[i], color = dat_cols[i])
elif isinstance(ignore_time_region[0], numbers.Number):
x=dat[0].index.values.astype('float')
lower=find_nearest_index(x,ignore_time_region[0])
upper=find_nearest_index(x,ignore_time_region[1])
for i in range(3):
for j in range(3):
if i==2:
_ = dat[i].iloc[:lower].plot(ax = ax2a[j], label = dat_names[i], style = dat_styles[i], color = dat_cols[i])
_ = dat[i].iloc[upper:].plot(ax = ax2a[j], label = '_nolegend_', style = dat_styles[i], color = dat_cols[i])
else:
_ = dat[i].iloc[:lower].plot(ax = ax2[j], label = dat_names[i], style = dat_styles[i], color = dat_cols[i])
_ = dat[i].iloc[upper:].plot(ax = ax2[j], label = '_nolegend_', style = dat_styles[i], color = dat_cols[i])
else:
try:
ignore_time_region_loc=flatten(ignore_time_region)
for k in range(len(ignore_time_region_loc)/2+1):
if k==0:
for i in range(3):
for j in range(3):
if i==2:
_ = dat[i].loc[:ignore_time_region_loc[k]].plot(ax = ax2a[j], label = dat_names[i], style = dat_styles[i], color = colm(i, cmap = cmap))
else:
_ = dat[i].loc[:ignore_time_region_loc[k]].plot(ax = ax2[j], label = dat_names[i], style = dat_styles[i], color = colm(i, cmap = cmap))
elif k<(len(ignore_time_region)/2):
for i in range(3):
for j in range(3):
if i==2:
_ = dat[i].loc[ignore_time_region_loc[2*k-1]:ignore_time_region_loc[2*k]].plot(ax = ax2a[j], label = '_nolegend_', style = dat_styles[i], color = colm(i, cmap = cmap))
else:
_ = dat[i].loc[ignore_time_region_loc[2*k-1]:ignore_time_region_loc[2*k]].plot(ax = ax2[j], label = '_nolegend_', style = dat_styles[i], color = colm(i, cmap = cmap))
else:
for i in range(3):
for j in range(3):
if i==2:
_ = dat[i].loc[ignore_time_region_loc[-1]:].plot(ax = ax2a[j], label = '_nolegend_', style = dat_styles[i], color = colm(i, cmap = cmap))
else:
_ = dat[i].loc[ignore_time_region_loc[-1]:].plot(ax = ax2[j], label = '_nolegend_', style = dat_styles[i], color = colm(i, cmap = cmap))
except:
for i in range(3):
for j in range(3):
if i==2:
_ = dat[i].plot(ax = ax2a[j], label = dat_names[i], style = dat_styles[i], color = colm(i, cmap = cmap))
else:
_ = dat[i].plot(ax = ax2[j], label = dat_names[i], style = dat_styles[i], color = colm(i, cmap = cmap))
ax2[0].set_xlim(xlimits)
ax2[0].set_xscale('symlog', linscale=0.1)
ax2[0].autoscale(axis='y', tight=True)
ax2a[0].set_xlim(xlimits)
ax2a[0].set_xscale('symlog', linscale=0.1)
ax2a[0].autoscale(axis='y', tight=True)
ax2[1].set_xlim(xlimits)
ax2[1].set_xscale('linear')
ax2[1].set_ylim(np.nanmax([limits[0],limits[1]/10000]),limits[1])
ax2[1].set_yscale('log')
ax2a[1].set_xlim(xlimits)
ax2a[1].set_xscale('linear')
ax2[2].set_xscale('log')
ax2[2].set_ylim(np.nanmax([limits[0],limits[1]/10000]),limits[1])
ax2[2].set_yscale('log')
ax2[2].set_xlim(max(0.1, xlimits[0]), xlimits[1])
ax2a[2].set_xscale('log')
ax2a[2].set_xlim(max(0.1, xlimits[0]), xlimits[1])
#draw a black line at zero
ax2a[0].plot(ax2[0].get_xlim(), [0, 0], 'black', zorder=10, label = '_nolegend_')
ax2a[1].plot(ax2[0].get_xlim(), [0, 0], 'black', zorder=10, label = '_nolegend_')
ax2a[2].plot(ax2[0].get_xlim(), [0, 0], 'black', zorder=10, label = '_nolegend_')
#plot empty to get the labels right
ax2[0].plot([], [], ' ', label=stringen)
ax2[0].legend(title='Model: {}'.format(mod),frameon=False)
if title is None:
ax2[1].legend(labels=[],title=f,frameon=False)
else:
if not len(title)==0:
ax2[1].legend(labels=[],title=title,frameon=False)
for t in times:
if isinstance(t,float):
ax2[0].plot([t,t],ax2[0].get_ylim(),lw=0.5,zorder=10,alpha=0.5,color='black')
ax2[1].plot([t,t],ax2[1].get_ylim(),lw=0.5,zorder=10,alpha=0.5,color='black')
ax2[2].plot([t,t],ax2[2].get_ylim(),lw=0.5,zorder=10,alpha=0.5,color='black')
x_label=ds.index.name
ax2[0].set_xlabel(x_label)
ax2[1].set_xlabel(x_label)
ax2[2].set_xlabel(x_label)
ax2a[0].set_xlabel(x_label)
ax2a[1].set_xlabel(x_label)
ax2a[2].set_xlabel(x_label)
fig2.tight_layout()
if 2 in plotting:#---plot single wavelength----------
fig3,ax3 = plt.subplots(figsize = (15,6),dpi = 100)
#fig3,ax3 = plt.subplots(figsize = (8,4),dpi = 100)
_=plot1d( ds = re['AC'], cmap = cmap, ax = ax3, width = width, wavelength = rel_wave,
lines_are = 'fitted', plot_type = scale_type, baseunit = baseunit, lintresh = lintresh,
timelimits = timelimits, text_in_legend = time_string, title = '',
ignore_time_region = ignore_time_region, data_type = data_type, units = units, from_fit = True)
_=plot1d( ds = re['A'], cmap = cmap,ax = ax3, subplot = True, width = width,
wavelength = rel_wave,lines_are = 'data', plot_type = scale_type,
baseunit = baseunit , lintresh = lintresh , timelimits = timelimits,
ignore_time_region = ignore_time_region, data_type = data_type, units = units, from_fit = True)
ax3.autoscale(axis = 'y',tight = True)
for t in times:
if isinstance(t, float):
ax3.plot([t, t], ax3.get_ylim(), lw = 0.5, zorder = 10, alpha = 0.5, color = 'black')
fig3.tight_layout()
if 3 in plotting: #---plot at set time----------
fig4, ax4 = plt.subplots(figsize = (15, 6), dpi = 100)
_=plot_time(ds=re['A'],cmap=cmap,ax=ax4,subplot=False, rel_time=rel_time, title=title, baseunit=baseunit,
time_width_percent=time_width_percent, lines_are='data', scattercut=scattercut,
bordercut = bordercut, intensity_range = intensity_range, data_type = data_type,
plot_second_as_energy = plot_second_as_energy, units = units, equal_energy_bin = equal_energy_bin, from_fit = True )
_=plot_time(ds=re['AC'],cmap=cmap,ax=ax4, subplot=False, rel_time=rel_time, title=title,
baseunit=baseunit, time_width_percent=time_width_percent, lines_are='fitted',
color_offset=color_offset, scattercut=scattercut, data_type = data_type,
plot_second_as_energy = plot_second_as_energy, units = units, equal_energy_bin = equal_energy_bin, from_fit = True )
try:
if equal_energy_bin is not None:
bordercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in bordercut]
ax4.set_xlim(bordercut)
except:
pass
if print_click_position:
plt.connect('button_press_event', mouse_move)
fig4.tight_layout()
if 4 in plotting:#---matrix with fit and error, as figures----------
fig5 = plot2d_fit(re, cmap = cmap, intensity_range = intensity_range, baseunit = baseunit,
error_matrix_amplification = error_matrix_amplification, wave_nm_bin = None, equal_energy_bin = equal_energy_bin,
use_images = True, log_scale = log_scale, scale_type = scale_type, patches = patches,
lintresh = lintresh, bordercut = bordercut, ignore_time_region = ignore_time_region,
scattercut = scattercut, timelimits = timelimits, levels = 200, data_type = data_type)
plt.ion()
plt.show()
if 5 in plotting:
fig6=plt.figure(figsize=(12,6))
G = GridSpec(4, 4)
ax6=[]
ax6.append(fig6.add_subplot(G[1:,0]))
ax6.append(fig6.add_subplot(G[1:,1]))
ax6.append(fig6.add_subplot(G[1:,2]))
ax6.append(fig6.add_subplot(G[1:,3]))
ax6.append(fig6.add_subplot(G[0,2:]))
ax6.append(fig6.add_subplot(G[0,0:2]))
n_colors=len(re['c'].columns)
for i in range(4):
ax6[i]=re['c'].plot(ax=ax6[i],color=colm(range(n_colors),cmap=cmap),legend=False)
if re['c'].index.name == 'time':
for i in range(4):
ax6[i].set_xlabel('time in %s'%baseunit)
ax6[1].set_yscale('log')
ax6[1].set_ylim(1e-5,1.1)
ax6[2].set_yscale('log')
ax6[2].set_ylim(1e-5,1.1)
ax6[2].set_xscale('log')
ax6[2].set_xlim(0.05,ax6[2].get_xlim()[1])
ax6[3].set_xscale('log')
ax6[3].set_xlim(0.05,ax6[3].get_xlim()[1])
handles, labels = ax6[3].get_legend_handles_labels()
ax6[4].axis('off')
ax6[5].axis('off')
if title is None:
title=f
else:
if not len(title)==0:
title=title
else:
title=''
if len(handles)<5:
ncol=2
elif len(handles)<7:
ncol=3
else:
ncol=4
leg=ax6[4].legend(handles,labels,loc=3, ncol=ncol,edgecolor=(0,0,0,1),framealpha=1,frameon=True,title=title)
for i in range(6):
ax6[i].set_title('')
ax6[5].text(0,0,'This factor represents the temporal evolution\n of the components in the fit.\nThis time dependent factor multiplied with the \nspectral intensity from the SAS/DAS is re[\"AC\"]',fontsize=float(plt.rcParams['legend.fontsize'])-1)
fig6.tight_layout()
if 6 in plotting:#---matrix with fit and error, as contours----------
fig7 = plot2d_fit(re, cmap = cmap, intensity_range = intensity_range, baseunit = baseunit,
error_matrix_amplification = error_matrix_amplification, wave_nm_bin = wave_nm_bin,
use_images = False, scale_type = scale_type, data_type = data_type)
plt.ion()
plt.show()
if save_figures_to_folder:
if path is None:path=os.path.dirname(os.path.realpath(__file__))
figure_path=check_folder(path=path)
if filename is None:
filename='test.fig'
fi=filename.split('.')[0]
try:
fig1.savefig(check_folder(path=figure_path,filename='%s_DAC.%s'%(fi,savetype)),bbox_inches='tight')
fig2.savefig(check_folder(path=figure_path,filename='%s_SUM.%s'%(fi,savetype)),bbox_inches='tight')
fig3.savefig(check_folder(path=figure_path,filename='%s_SEL.%s'%(fi,savetype)),bbox_inches='tight')
fig4.savefig(check_folder(path=figure_path,filename='%s_SPEC.%s'%(fi,savetype)),bbox_inches='tight')
fig5.savefig(check_folder(path=figure_path,filename='%s_FIG_MAT.%s'%(fi,savetype)),bbox_inches='tight')
fig6.savefig(check_folder(path=figure_path,filename='%s_concentrations.%s'%(fi,savetype)),bbox_inches='tight')
fig7.savefig(check_folder(path=figure_path,filename='%s_CONTOUR.%s'%(fi,savetype)),bbox_inches='tight')
except:
pass
def plot_raw(ds = None, plotting = range(4), title = None, intensity_range = 1e-2, baseunit = 'ps',
timelimits = None, scattercut = None, bordercut = None, wave_nm_bin = None, width = 10,
rel_wave = np.arange(400, 900, 100), rel_time = [1, 5, 10], time_width_percent = 10,
ignore_time_region = None, time_bin = None, cmap = None, color_offset = 0,
log_scale = True, plot_type = 'symlog', lintresh = 0.3, times = None,
save_figures_to_folder = False, savetype = 'png', path = None, filename = None,
print_click_position = False, data_type = 'differential Absorption in $\mathregular{\Delta OD}$',
plot_second_as_energy = True, units = 'nm', return_plots = False, equal_energy_bin = None):
'''This is the extended plot function, for convenient object based plotting see TA.Plot_RAW
This function plotts of various RAW (non fitted) plots. Based on the DataFrame ds a number of
cuts are created using the shaping parameters explained below.
In all plots the RAW data is plotted as dots and interpolated with lines
(using Savitzky-Golay window=5, order=3 interpolation).
Parameters
---------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
plotting : int or iterable (of integers), optional
This parameter determines which figures are plotted
the figures can be called separately with plotting = 1
or with a list of plots (Default) e.g.plotting=range(4) calls plots 0,1,2,3
The plots have the following numbers:
0. Matrix
1. Kinetics
2. Spectra
3. SVD
title : None or str
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will
be used in all plots. To remove the title all together set an empty string with this command title=""
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) "ps", but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
equal_energy_bin : None or float(optional)
if this is set the wave_nm_bin is ignored and the data is rebinned into equal energy bins (based upon that the data is in nm.
If dual axis is on then the lower axis is energy and the upper is wavelength
width : float, optional
the width used in kinetics, see below (Default) 10nm
rel_wave : float or list (of floats), optional
"rel_wave" and "width" (in the object called "wavelength_bin" work together for the creation
of kinetic plots. When plotting kinetic spectra one line will be plotted for each entrance
in the list/vector rel_wave. During object generation the vector np.arange(300,1000,100)
is set as standard. Another typical using style would be to define a list of interesting
wavelength at which a kinetic development is to be plotted. At each selected wavelength
the data between wavelength+ta.wavelength_bin and wavelength-ta.wavelength_bin is averaged
for each timepoint returned
rel_time : float or list/vector (of floats), optional
For each entry in rel_time a spectrum is plotted. If time_width_percent=0 (Default) the
nearest measured timepoint is chosen. For other values see "time_width_percent"
time_width_percent : float
"rel_time" and "time_width_percent" work together for creating spectral plots at
specific timepoints. For each entry in rel_time a spectrum is plotted.
If however e.g. time_width_percent=10 the region between the timepoint closest
to the 1.1 x timepoint and 0.9 x timepoint is averaged and shown
(and the legend adjusted accordingly). This is particularly useful for the densly
sampled region close to t=0. Typically for a logarithmic recorded kinetics, the
timepoints at later times will be further appart than 10 percent of the value,
but this allows to elegantly combine values around time=0 for better statistics.
This averaging is only applied for the plotting function and not for the fits.
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
time_bin : None or int, optional
is dividing the points on the time-axis in even bins and averages the found values in between.
This is a hard approach that also affects the fits. I do recommend to use this carefully,
it is most useful for modulated data. A better choice for transient absorption that only
affects the kinetics is "time_width_percent"
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g. your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
color_offset : int, optional
At the (Default) 0 the colours are chose from the beginning, for a larger value Color_offset
colors are skipped. Usually only used if multiple plots are created, and the data/or fit is
only shown for some of them.
log_scale : bool, optional
If True (Default), The 2D plots (Matrix) is plotted with a pseudo logarithmic intensity scale.
This usually does not give good results unless the intensity scale is symmetric
Scale_type : None or str
is a general setting that can influences what time axis will be used for the plots.
"symlog" (linear around zero and logarithmic otherwise) "lin" and "log" are valid options.
lintresh : float
The pseudo logratihmic range "symlog" is used for most time axis. Symlog plots a range around
time zero linear and beyond this linear treshold 'lintresh' on a logarithmic scale. (Default) 0.3
times : None or int
are the number of components to be used in the SVD (Default) is None (which is seen as 6)
save_figures_to_folder : bool, optional
(Default) is False, if True the Figures are automatically saved
savetype : str or iterable (of str), optional
matplotlib allows the saving of figures in various formats. (Default) "png",
typical and recommendable options are "svg" and "pdf".
path : None, str or path object, optional
This defines where the files are saved if the safe_figures_to_folder parameter is True,
quite useful if a lot of data sets are to be printed fast.
If a path is given, this is used. If a string like the (Default) "result_figures" is given,
then a subfolder of this name will be used (an generated if necessary)
relative to self.path. Use and empty string to use the self.path
If set to None, the location of the plot_func will be used and
a subfolder with title "result_figures" be generated here
filename : str, optional
offers to replace the base-name used for all plots (to e.g.~specify what sample was used).
if (Default) None is used, the self.filename is used as a base name. The filename plays only a
role during saving, as does the path and savetype
print_click_position : bool, optional
if True then the click position is printed for the spectral plots
return_plots : bool, optional
(Default) False, return is ignoriert. For True a dictionary with the handles to the figures is returned
'''
if ds is None:raise ValueError('We need something to plot!!!')
if baseunit != 'ps':
if baseunit == 'ns':
ds.index.name = 'Time in ns'
else:
ds.index.name=baseunit
if path is None:path=check_folder(path='result_figures',current_path=os.path.dirname(os.path.realpath(__file__)))
if filename is None:filename='standard.sia'
if not hasattr(plotting,'__iter__'):plotting=[plotting]
debug=False
plt.ion()
if 0 in plotting:#MAtrix
fig1 = plot2d(ds = ds, cmap = cmap, ignore_time_region = ignore_time_region, plot_type = plot_type,
baseunit = baseunit, intensity_range = intensity_range, scattercut = scattercut,
bordercut = bordercut, wave_nm_bin = wave_nm_bin, levels = 200, lintresh = lintresh,
timelimits = timelimits, time_bin = time_bin, title = title, log_scale = log_scale,
data_type = data_type, equal_energy_bin = equal_energy_bin)
fig1.tight_layout()
if debug:print('plotted Matrix')
if 1 in plotting:#kinetics
fig2,ax2=plt.subplots(figsize=(10,6),dpi=100)
_ = plot1d(ds = ds, ax = ax2, subplot = True, cmap = cmap, width = width, wavelength = rel_wave,
title = title, lines_are = 'data' , plot_type = plot_type, lintresh = lintresh,
timelimits = timelimits, intensity_range = intensity_range, scattercut = scattercut,
ignore_time_region = ignore_time_region, baseunit = baseunit, data_type = data_type, units = units)
_ = plot1d(ds = ds, ax = ax2, subplot = False, cmap = cmap, width = width, wavelength = rel_wave,
title = title, lines_are = 'smoothed', plot_type = plot_type, lintresh = lintresh,
timelimits = timelimits, intensity_range = intensity_range, scattercut = scattercut,
ignore_time_region = ignore_time_region, baseunit = baseunit, data_type = data_type, units = units )
if debug:print('plotted kinetics')
if 2 in plotting:#Spectra
fig3,ax3 = plt.subplots(figsize = (10,6),dpi = 100)
_ = plot_time(ds, subplot = True, ax = ax3, plot_second_as_energy = False, cmap = cmap,
rel_time = rel_time, time_width_percent = time_width_percent, title = title,
baseunit = baseunit, lines_are = 'data' , scattercut = scattercut,
wave_nm_bin = wave_nm_bin, bordercut = bordercut, intensity_range = intensity_range,
ignore_time_region = ignore_time_region, data_type = data_type, units = units, equal_energy_bin = equal_energy_bin)
if plot_second_as_energy:
_ = plot_time(ds ,subplot = False, ax = ax3, plot_second_as_energy = True, cmap = cmap,
rel_time = rel_time, time_width_percent = time_width_percent, title = title,
baseunit = baseunit, lines_are = 'smoothed', scattercut = scattercut,
wave_nm_bin = wave_nm_bin, bordercut = bordercut, intensity_range = intensity_range,
ignore_time_region = ignore_time_region, data_type = data_type, units = units, equal_energy_bin = equal_energy_bin)
else:
_ = plot_time(ds ,subplot = False, ax = ax3, plot_second_as_energy = False, cmap = cmap,
rel_time = rel_time, time_width_percent = time_width_percent, title = title,
baseunit = baseunit, lines_are = 'smoothed', scattercut = scattercut,
wave_nm_bin = wave_nm_bin, bordercut = bordercut, intensity_range = intensity_range,
ignore_time_region = ignore_time_region, data_type = data_type, units = units, equal_energy_bin = equal_energy_bin)
if print_click_position:
plt.connect('button_press_event', mouse_move)
fig3.tight_layout()
if debug:print('plotted Spectra')
if 3 in plotting: #---plot at set time----------
try:
fig4 = SVD(ds , times = times , timelimits = timelimits , scattercut = scattercut ,
bordercut = bordercut , wave_nm_bin = wave_nm_bin, ignore_time_region = ignore_time_region,
cmap = cmap)
except:
print("SVD failed with:",sys.exc_info()[0])
if debug:print('plotted SVD')
plt.show()
if save_figures_to_folder:
fi=filename.split('.')[0]
try:
fig1.savefig(check_folder(path=path,filename='%s_RAW_MAT.%s'%(fi,savetype)),bbox_inches='tight',dpi=300)
fig2.savefig(check_folder(path=path,filename='%s_RAW_SEL.%s'%(fi,savetype)),bbox_inches='tight',dpi=300)
fig3.savefig(check_folder(path=path,filename='%s_RAW_SPEK.%s'%(fi,savetype)),bbox_inches='tight',dpi=300)
fig4.savefig(check_folder(path=path,filename='%s_RAW_SVD.%s'%(fi,savetype)),bbox_inches='tight',dpi=300)
except:
pass
if return_plots:
return_dicten={}
try:
return_dicten[0]=fig1
except:
pass
try:
return_dicten[1]=fig2
except:
pass
try:
return_dicten[2]=fig3
except:
pass
try:
return_dicten[3]=fig4
except:
pass
return return_dicten
def plot_time(ds, ax = None, rel_time = None, time_width_percent = 10, ignore_time_region = None,
wave_nm_bin = None, title = None, text_in_legend = None, baseunit = 'ps',
lines_are = 'smoothed', scattercut = None, bordercut = None, subplot = False, linewidth = 1,
color_offset = 0, intensity_range = None, plot_second_as_energy = True, cmap = standard_map,
data_type = 'differential Absorption in $\mathregular{\Delta OD}$', units = 'nm', equal_energy_bin = None, from_fit = None):
'''Function to create plots at a certain time. In general you give under rel_time a
list of times at which yu do want to plot the time width percentage means that
this function integrates ewverything plus minus 10% at this time. lines_are is a
switch that regulates what is plotted. data plots the data only,
smoothed plots the data and a smoothed version of the data, fitted plots only the fit.
the subplot switch is for using this to plot e.g. multiple different datasets.
Parameters
---------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
ax : None or matplotlib axis object, optional
if None (Default), a figure and axis will be generated for the plot, if axis is given the plot will
placed in there.
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
rel_time : float or list/vector (of floats), optional
For each entry in rel_time a spectrum is plotted. If time_width_percent=0 (Default) the
nearest measured timepoint is chosen. For other values see 'time_width_percent'
time_width_percent : float
"rel_time" and "time_width_percent" work together for creating spectral plots at
specific timepoints. For each entry in rel_time a spectrum is plotted.
If however e.g. time_width_percent=10 the region between the timepoint closest
to the 1.1 x timepoint and 0.9 x timepoint is averaged and shown
(and the legend adjusted accordingly). This is particularly useful for the densly
sampled region close to t=0. Typically for a logarithmic recorded kinetics, the
timepoints at later times will be further appart than 10 percent of the value,
but this allows to elegantly combine values around time=0 for better statistics.
This averaging is only applied for the plotting function and not for the fits.
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
title : None or str, optional
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will
be used in all plots. To remove the title all together set an empty string with this command title=""
linewidth : float, optional
linewidth to be used for plotting
text_in_legend : str, optional
text to be used in legend before the actually lines and colours (set as heasder)
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
subplot ; bool, optional
False (Default) means this is a main plot in this axis! if True then this is the second plot in the axis
and things like axis ticks should not be reset
this also avoids adding the object to the legend
color_offset : int, optional
At the (Default) 0 the colours are chose from the beginning, for a larger value Color_offset
colors are skipped. Usually only used if multiple plots are created, and the data/or fit is
only shown for some of them.
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
plot_second_as_energy : bool, optional
For (Default) True a second x-axis is plotted with "eV" as unit
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
'''
if not hasattr(rel_time,'__iter__'):rel_time=[rel_time]
rel_time=[a for a in rel_time if a<ds.index.values.astype('float').max()]
if isinstance(cmap,list):
colors=cmap[color_offset:]
else:
colors=colm(np.arange(color_offset,len(rel_time)+color_offset),cmap=cmap)
if ax is None:
fig,ax1=plt.subplots(figsize=(10,6),dpi=100)
else:
ax1=ax
ds = sub_ds(ds = ds, times = rel_time, time_width_percent = time_width_percent,
scattercut = scattercut, drop_scatter=True, bordercut = bordercut, baseunit=baseunit,
ignore_time_region = ignore_time_region, wave_nm_bin = wave_nm_bin, equal_energy_bin = equal_energy_bin, from_fit = from_fit)
if 'smoothed' in lines_are:
if scattercut is None:
smoothed=Frame_golay(ds, window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
elif isinstance(scattercut[0], numbers.Number):#handling single scattercut
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
scattercut=scattercut[::-1]
smoothed=Frame_golay(ds.loc[:scattercut[0],:], window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
smoothed=Frame_golay(ds.loc[scattercut[1]:,:], window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
else:#handling multiple scattercuts
try:
scattercut=flatten(scattercut)
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
scattercut=scattercut[::-1]
for i in range(len(scattercut)):
if i==0:
smoothed=Frame_golay(ds.loc[:scattercut[0],:], window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
elif i<(len(scattercut)/2):
smoothed=Frame_golay(ds.loc[scattercut[2*i-1]:scattercut[2*i],:], window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
else:
smoothed=Frame_golay(ds.loc[scattercut[-1]:,:], window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
except:
print('printing the smoothed scatter interpolation created an error, using default')
smoothed=Frame_golay(window = 5, order = 3,transpose=False)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
if not subplot:
leg = ax1.legend(ds,title = 'lines = smoothed', loc='best', labelspacing = 0, ncol = 2, columnspacing = 1, handlelength = 1, frameon = False)
elif 'data' in lines_are:
if subplot:
ax1 = ds.plot(ax = ax1, legend = False, style = '*', color = colors, zorder = 0)
else:
ax1 = ds.plot(ax = ax1, legend = False, style = '*', color = colors)
leg = ax1.legend(ds,labelspacing = 0, ncol = 2, columnspacing = 1, handlelength = 1, loc = 'best', frameon = False)
elif 'fitted' in lines_are:
if scattercut is None:
ds.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, alpha = 0.7)
elif isinstance(scattercut[0], numbers.Number):
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
ds.loc[:scattercut[0],:].plot(ax = ax1, legend = False, style = '-', color = colors, alpha = 0.7, lw = linewidth)
ds.loc[scattercut[1]:,:].plot(ax = ax1, legend = False, style = '-', color = colors, alpha = 0.7, lw = linewidth, label='_nolegend_')
else:
try:
scattercut=flatten(scattercut)
if equal_energy_bin is not None:
scattercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in scattercut]
for i in range(len(scattercut)):
if i==0:
ds.loc[:scattercut[0],:].plot(ax = ax1, legend = False, style = '-', color = colors, alpha = 0.7, lw = linewidth)
elif i<(len(scattercut)/2):
ds.loc[scattercut[2*i-1]:scattercut[2*i],:].plot(ax = ax1, legend = False, style = '-', color = colors, alpha = 0.7, lw = linewidth, label='_nolegend_')
else:
ds.loc[scattercut[-1]:,:].plot(ax = ax1, legend = False, style = '-', color = colors, alpha = 0.7, lw = linewidth, label='_nolegend_')
except:
ds.plot(ax = ax1, legend = False, style = '-', color = colors, alpha = 0.7, lw = linewidth)
if not subplot:leg = ax1.legend(ds,title = 'lines = fit', loc = 'best', labelspacing = 0, ncol = 2, columnspacing = 1, handlelength = 1, frameon = False)
if not subplot:
if text_in_legend is not None:
stringen=leg.get_title().get_text()
texten=text_in_legend
leg.set_title(texten + '\n' +stringen)
else:#for multiple plotting
return ax1
if bordercut is None:
ax1.autoscale(axis='x',tight=True)
else:
if equal_energy_bin is not None:
bordercut=[scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt) for a in bordercut]
#bordercut=bordercut[::-1]
ax1.set_xlim(bordercut)
if (not subplot) and plot_second_as_energy:
ax2=ax1.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(ax1.get_xticks())
if equal_energy_bin is not None:
labels=['%.1f'%(scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt)) for a in ax2.get_xticks()]
else:
labels=['%.2f'%(scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt)) for a in ax2.get_xticks()]
_=ax2.set_xticklabels(labels)
if equal_energy_bin is not None:
_=ax2.set_xlabel('Wavelength in nm')
else:
_=ax2.set_xlabel('Energy in eV')
ax1.set_zorder(ax2.get_zorder()+1)
if not subplot:
if not len(title)==0:
try:
ax2.set_title(title,pad=10)
except:
ax1.set_title(title,pad=10)
ax1.set_ylabel(data_type)
ax1.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%.2g'%(x)))
ax1.set_xlabel(ds.index.name)
ax1.minorticks_on()
#ax1.xaxis.set_minor_locator(AutoMinorLocator(6))
ax1.plot(ax1.get_xlim(),[0,0],color='black',lw=0.5,zorder=0, label='_nolegend_')
if intensity_range is None:
ax1.autoscale(axis='y',tight=True)
else:
if not hasattr(intensity_range,'__iter__'):#lets have an lazy option
intensity_range=np.array([-intensity_range,intensity_range])
ax1.set_ylim(intensity_range)
if ax is None:
return fig
else:
return ax1
def plot1d(ds = None, wavelength = None, width = None, ax = None, subplot = False, title = None, intensity_range = None,
baseunit = 'ps', timelimits = None, scattercut = None, bordercut = None, cmap = standard_map, plot_type = 'symlog',
lintresh = 0.1, text_in_legend = None, lines_are = 'smoothed', color_offset = 0, ignore_time_region = None,
linewidth = 1, data_type = 'differential Absorption in $\mathregular{\Delta OD}$', units = 'nm', from_fit = False):
'''Plots the single line kinetic for specific wavelength given with the parameter wavelength.
Parameters
---------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
wavelength : float or list (of floats)
wavelength is in the object called "rel_wave" and works with "width"
(in the object called "wavelength_bin") together for the creation
of kinetic plots. When plotting kinetic spectra one line will be plotted for each entrance
in the list/vector rel_wave. During object generation the vector np.arange(300,1000,100)
is set as standard. Another typical using style would be to define a list of interesting
wavelength at which a kinetic development is to be plotted. At each selected wavelength
the data between wavelength+ta.wavelength_bin and wavelength-ta.wavelength_bin is averaged
for each timepoint returned
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
width : float, optional
the width used in kinetics, see below (Default) 10nm
ax : None, matplotlib axis object optional
If None (Default) a new plot is is created and a new axis, otherwise ax needs to be Matplotlib Axis
subplot : bool, optional
If False (Default) axis labels and such are set. If True, we plot into the same axis and
do not set labels
text_in_legend : None, str, optional
extra text to be put into the legend (above the lines)
lines_are : str, optional
Depending on this parameter the plots contain:
'smoothed' = data lines of golay smoothed data (Default)
'data' = dots are data,
'fitted' = not data, just lines shown
title : None or str
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will
be used in all plots. To remove the title all together set an empty string
linewidth : float, optional
linewidht to be used for plotting
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
color_offset : int, optional
At the (Default) 0 the colours are chose from the beginning, for a larger value Color_offset
colors are skipped. Usually only used if multiple plots are created, and the data/or fit is
only shown for some of them.
plot_type : None or str
is a general setting that can influences what time axis will be used for the plots.
"symlog" (linear around zero and logarithmic otherwise) "lin" and "log" are valid options.
lintresh : float
The pseudo logratihmic range "symlog" is used for most time axis. Symlog plots a range around
time zero linear and beyond this linear treshold 'lintresh' on a logarithmic scale. (Default) 0.3
from_fit : bool optional
i needed this swtich to avoid re-slicing of data in spectal axis for equal energy bins
'''
if not isinstance(ds,pandas.DataFrame):
print("input format wrong")
if ax is None:
fig,ax1=plt.subplots(figsize=(10,6),dpi=100)
else:
ax1=ax
if width is None:width=1
if not hasattr(wavelength, '__iter__'):wavelength = [wavelength]
if isinstance(cmap,list):
colors=cmap[color_offset:]
else:
colors = colm(np.arange(color_offset, len(wavelength)+color_offset), cmap = cmap)
ds = sub_ds(ds = ds, wavelength = wavelength, wavelength_bin = width, scattercut = scattercut,
ignore_time_region = ignore_time_region, drop_ignore = True, from_fit = from_fit)
if 'smoothed' in lines_are:
if ignore_time_region is None:
smoothed=Frame_golay(ds, window = 5, order = 3)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
elif isinstance(ignore_time_region[0], numbers.Number):
smoothed=Frame_golay(ds.loc[:ignore_time_region[0],:], window = 5, order = 3)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
smoothed=Frame_golay(ds.loc[ignore_time_region[1]:,:], window = 5, order = 3)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
else:
try:
ignore_time_region=flatten(ignore_time_region)
for i in range(len(ignore_time_region)/2+1):
if i==0:
smoothed=Frame_golay(ds.loc[:ignore_time_region[0],:], window = 5, order = 3,transpose=True)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
elif i<(len(ignore_time_region)/2):
smoothed=Frame_golay(ds.loc[ignore_time_region[2*i-1]:ignore_time_region[2*i],:], window = 5, order = 3,transpose=True)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
else:
smoothed=Frame_golay(ds.loc[ignore_time_region[-1]:,:], window = 5, order = 3,transpose=True)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
except:
smoothed=Frame_golay(ds, window = 5, order = 3)
smoothed.plot(ax = ax1, style = '-', color = colors, legend = False, lw = linewidth)
elif 'data' in lines_are:
if subplot:ds.plot(ax = ax1, style = '*', color = colors, legend = False, zorder = 0, label='_nolegend_')
else: ds.plot(ax = ax1, style = '*', color = colors, legend = False)
elif 'fitted' in lines_are:
if ignore_time_region is None:
ds.plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth)
elif isinstance(ignore_time_region[0], numbers.Number):
ds.loc[:ignore_time_region[0],:].plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth)
ds.loc[ignore_time_region[1]:,:].plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
else:
try:
ignore_time_region=flatten(ignore_time_region)
for i in range(len(ignore_time_region)/2+1):
if i==0:
ds.loc[:ignore_time_region[0],:].plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth)
elif i<(len(ignore_time_region)/2):
ds.loc[ignore_time_region[2*i-1]:ignore_time_region[2*i],:].plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
else:
ds.loc[ignore_time_region[-1]:,:].plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth, label='_nolegend_')
except:
ds.plot(ax = ax1, style='-', color = colors, legend = False, lw = linewidth)
#Legend
if not subplot:
handles, labels = ax1.get_legend_handles_labels()
handles=handles[:len(wavelength)]
labels=labels[:len(wavelength)]
for i,entry in enumerate(labels):
labels[i]=entry + ' %s'%units
if 'smoothed' in lines_are:leg=ax1.legend(labels,title='%g %s width, lines=smoothed'%(width,units),labelspacing=0,ncol=2,columnspacing=1,handlelength=1,frameon=False)
elif 'data' in lines_are: leg=ax1.legend(labels,title='%g %s width'%(width,units) ,labelspacing=0,ncol=2,columnspacing=1,handlelength=1,frameon=False)
elif 'fitted' in lines_are:leg=ax1.legend(labels,title='%g %s width, lines=fit'%(width,units) ,labelspacing=0,ncol=2,columnspacing=1,handlelength=1,frameon=False)
if text_in_legend is not None:
stringen=leg.get_title().get_text()
texten=text_in_legend
leg.set_title(texten + '\n' +stringen)
x=ds.index.values.astype('float')
#limits and ticks
if timelimits is None:timelimits=[min(x),max(x)]
if "symlog" in plot_type:
lintresh=lintresh
ax1.set_xscale('symlog', linthresh=lintresh,subs=range(2,9),linscale=lintresh/4.)
try:
if lintresh>0.5:
ticks=np.concatenate((np.arange(-100,0,10,),[-5,-3,-2,-1,-0.5,0,0.5],np.logspace(0,10,11)))
elif lintresh>=0.3:
ticks=np.concatenate((np.arange(-100,0,10,),[-5,-3,-2,-1,-0.3,0,0.3],np.logspace(0,10,11)))
elif lintresh>=0.1:
ticks=np.concatenate((np.arange(-100,0,10,),[-5,-3,-2,-1,-0.1,0,0.1],np.logspace(0,10,11)))
else:
ticks=np.concatenate((np.arange(-100,0,10,),[-5,-3,-2,-1,0],np.logspace(0,10,11)))
ticks=ticks[ticks>timelimits[0]]
ticks=ticks[ticks<timelimits[1]]
ax1.set_xticks(ticks)
except:
pass
ax1.set_xlim(timelimits[0],timelimits[1])
elif "log" in plot_type:
lower_time=max(1e-6,timelimits[0])
ax1.set_xlim(lower_time,timelimits[1])
ax1.set_xscale('log')
elif "lin" in plot_type:
ax1.set_xlim(timelimits[0],timelimits[1])
if intensity_range is None:
ax1.autoscale(axis='y',tight=True)
else:
if not hasattr(intensity_range,'__iter__'):#lets have an lazy option
intensity_range=np.array([-intensity_range,intensity_range])
ax1.set_ylim(intensity_range)
if not subplot:
ax1.plot(ax1.get_xlim(),[0,0],'black',lw=1,zorder=10, label='_nolegend_')
if title is not None:
if title:
try:
ax2.set_title(title,pad=10)
except:
ax1.set_title(title,pad=10)
if "symlog" in plot_type:
ax1.plot([lintresh,lintresh],ax1.get_ylim(),color='black',linestyle='dashed',alpha=0.5)
ax1.plot([-lintresh,-lintresh],ax1.get_ylim(),color='black',linestyle='dashed',alpha=0.5)
ax1.set_xlabel(ds.index.name)
ax1.set_ylabel(data_type)
#ax1.set_xlabel('time in %s'%baseunit)
#ax1.set_ylabel('differential Absorption in $\mathregular{\Delta OD}$')
ax1.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%.2g'%(x)))
if ax is None:
return fig
else:
return ax1
def SVD(ds, times = None, scattercut = None, bordercut = None, timelimits = [5e-1, 150], wave_nm_bin = 10,
time_bin = None, wavelength_bin = None, plotting = True, baseunit = 'ps', title = None, ignore_time_region = None,
cmap = None, equal_energy_bin = None, data_type = 'differential Absorption in $\mathregular{\Delta OD}$'):
'''This function calculates the SVD and plots an overview.
Parameters
------------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
times : None or int
are the number of components to be used in the SVD (Default) is None (which is seen as 6)
plotting : bool
if True (Default) the functions plots the SVD, if False it returns the vectors
title : None or str
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will
be used in all plots. To remove the title all together set an empty string with this command title=""
baseunit : str
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
timelimits : None or list (of 2 floats), optional
cut times at the low and high time limit. (Default) [5e-1 , 150] uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
bordercut : None or iterable (with two floats), optional
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared. (Default = 10)
wavelength_bin : float, optional
the width used in kinetics, see below (Default) 10nm
ignore_time_region : None or list (of two floats or of lists), optional
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
time_bin : None or int, optional
is dividing the points on the time-axis in even bins and averages the found values in between.
This is a hard approach that also affects the fits. I do recommend to use this carefully,
it is most useful for modulated data. A better choice for transient absorption that only
affects the kinetics is 'time_width_percent'
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
'''
if times is None:
max_order=6
else:
max_order=times
if cmap is None:cmap=standard_map
colors=colm(np.arange(0,max_order,1),cmap=cmap)
ds = sub_ds(ds, scattercut = scattercut, bordercut = bordercut, timelimits = timelimits, wave_nm_bin = wave_nm_bin,
wavelength_bin = wavelength_bin, time_bin = time_bin, ignore_time_region = ignore_time_region)
U, s, V = np.linalg.svd(ds.values)
if plotting:
fig=plt.figure(figsize=(8,8),dpi=100)
G = GridSpec(2, 6)
ax1=fig.add_subplot(G[0,:2])
ax2=fig.add_subplot(G[1,:])
ax3=fig.add_subplot(G[0,2:])
if title is not None:
ax2.set_title(title,fontsize=plt.rcParams['figure.titlesize']-4)
else:
ax1.set_title("Component\nstrength",fontsize=plt.rcParams['figure.titlesize']-4)
ax2.set_title("Spectral component",fontsize=plt.rcParams['figure.titlesize']-4)
ax3.set_title("Temporal development\nof spectral component",fontsize=plt.rcParams['figure.titlesize']-4)
s/=s.max()
ax1.scatter(np.arange(max_order)+1,s[:max_order],c=colors,s=100)
ax1.set_xlabel('SVD order',fontsize=plt.rcParams['axes.labelsize']-2)
ax1.set_ylabel('Singular values norm.',fontsize=plt.rcParams['axes.labelsize']-2)
ax1.set_xlim(0.5,max_order+0.5)
if max_order == 6:
ax1.set_xticks([round(a) for a in np.linspace(1,max_order,6)])
else:
ax1.set_xticks([round(a) for a in np.linspace(1,max_order,5)])
V2=pandas.DataFrame(V.T,index=ds.columns.values.astype('float'))
U2=pandas.DataFrame(U,index=ds.index.values.astype('float'))
U2=U2.iloc[:,:len(s)].multiply(-s)
V2=V2.iloc[:,:len(s)].multiply(-s)
names=['SVD vector %i'%(a+1) for a in range(max_order)]
U2=U2.iloc[:,:max_order]
U2.columns=names
V2=V2.iloc[:,:max_order]
V2.columns=names
V2/=V2.abs().max(axis=1).max()
V2.plot(ax=ax2,color=colors)
ax2.set_ylabel('Intensity norm.',fontsize=plt.rcParams['axes.labelsize']-2)
U2/=U2.abs().max(axis=1).max()
U2.plot(ax=ax3,color=colors)
ax3.set_ylabel('Intensity norm.',fontsize=plt.rcParams['axes.labelsize']-2)
ax2.set_xlabel(ds.columns.name,fontsize=plt.rcParams['axes.labelsize']-2)
lims=V2.index.values.astype(float)
ax2.set_xlim(lims.min(),lims.max())
#ax2.set_xticks(np.linspace(round(lims.min(),-2),round(lims.max()-2),5))
ax2.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%.4g'%(x)))
ax3.set_xlabel(ds.index.name,fontsize=plt.rcParams['axes.labelsize']-2)
tims=U2.index.values.astype(float)
ax3.set_xlim(max([0.01,tims.min()]),tims.max())
ax3.set_xscale('log')
#ax3.set_xticks(np.logspace(-1,2,round(np.log())))
ax3.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%1g'%(x)))
ax2.legend(frameon=False,labelspacing=0,borderpad=0,numpoints=1,handlelength=1)
ax3.legend(frameon=False,fontsize=11,labelspacing=0,borderpad=0,numpoints=1,handlelength=1)
fig.tight_layout()
return fig
else:
return U, s, V2,ds
def Species_Spectra(ta=None,conc=None,das=None):
'''useful help function that returns a dictionary that has DataFrame as entries and the names of the
components as keys
Parameters
-----------
ta : plot_func.TA object, optional
This object should contain a successful fit. The function will cycle through the fitted species
and return the matrix that is formed from the dynamics and the species associated spectrum
If this given, then "conc" and "das" are ignored. We cycle through the columns of the concentration
and take the same column from the das Frame.
conc : DataFrame, optional
Is read only if ta_object is None. This should contain the concentration matrix with the species as
as columns
das : DataFrame, optional
This should contain the spectra of the species with one column per spectrum. The position of the columns
must match the columns in the conc (at least this is what is assumed)
Examples
---------
dicten=Species_Spectra(ta)
'''
if ta is not None:
try:
time=ta.re['c'].index.values
WL=ta.re['DAC'].index.values
conc=ta.re['c']
das=ta.re['DAC']
except:
print('the TA object must contain a successful fit')
print(ta.re)
return False
else:
if (conc is None) or (das is None):
print('If the ta object is None, then we need both the conc and the das')
return False
results={}
for i in range(len(conc.columns)):
A,B=np.meshgrid(conc.iloc[:,i].values,das.iloc[:,i].values)
C=pandas.DataFrame((A*B).T,index=time,columns=WL)
results[conc.columns[i]]=C
return results
def Fix_Chirp(ds, save_file = None, scattercut = None, intensity_range = 5e-3, wave_nm_bin = 10, bordercut=None,
shown_window = [-1.5, 1.5], filename = None, path = None, fitcoeff = None, max_points = 40,
cmap = cm.prism):
'''Manual selecting polynom for chirp. This function is opening
a plot and allows the user to select a number of points, which are then
approximated with a 4th order polynomial and finally to select a point
that is declared as time zero. The observed window as well as the intensities
and the colour map can be chosen to enable a good correction. Here a fast
iterating colour scheme such as "prism" is often a good choice. In all of the
selections a left click selects, a right click removes the last point and
a middle click (sometime appreviated by clicking left and right together)
finishes the selection. If no middle click exists, the process
automatically ends after max_points (40 preset).
Many of the parameters are from the raw plotting part
Parameters
-----------
ds : DataFrame
This dataframe contains the data to be plotted. It is copied and sliced into the
regions defined. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
save_file : None or str, optional
If a raw file was read(e.g. "data.SIA") and the chirp correction was
completed, a file with the attached word "chirp" is created and
stored in the same location. ("data_chirp.dat") This file contains
the 5 values of the chirp correction. By selecting such a file
(e.g. from another raw data) a specific chirp is applied. If a
specific name is given with **chirp_file** (and optional **path**)
then this file is used.\n
GUI\n
The word *'gui'* can be used instead of a filename to open a gui that
allows the selection of a chrip file
scattercut : None or iterable (of floats or other iterable, always pairs!), optional
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
intensity_range : None, float or list [of two floats]
intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
wave_nm_bin : None or float, optional
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
shown_window : list (with two floats), optional
Defines the window that is shown during chirp correction. If the t=0 is not visible, adjust this parameter
to suit the experiment. If problems arise, I recomment to use Plot_Raw to check where t=0 is located
filename : str, optional
name of the original file, that will be used to save the results later (with attached "_chirp")
path : str or path object (optional)
if path is a string without the operation system dependent separator, it is treated as a relative path,
e.g. data will look from the working directory in the sub director data. Otherwise this has to be a
full path in either strong or path object form.
fitcoeff : list or vector (5 floats), optional
One can give a vector/list with 5 numbers representing the parameter
of a 4th order polynomial (in the order
:math:`(a4*x^4 + a3*x^3+a2*x^2+a1*x1+a0)`. The chirp parameter are
stored in ta.fitcoeff and can thus be used in other TA objects. This
vector is also stored with the file and automatically applied during
re-loading of a hdf5-object
max_points : int, optional
Default = 40 max numbers of points to use in Gui selection. Useful option in case no middle mouse button
is available. (e.g. touchpad)
cmap : matplotlib colourmap, optional
Colourmap to be used for the chirp correction. While there is a large selection here I recommend to choose
a different map than is used for the normal 2d plotting.\n
cm.prism (Default) has proofen to be very usefull
'''
ds=ds.fillna(0)
if fitcoeff is not None:#we loaded a previous project this is a dublication, but I'm currently to lazy to make this tighter
if isinstance(fitcoeff,str):fitcoeff=np.array(fitcoeff.split(','),dtype='float')
if len(fitcoeff)==6:#old style parameter
fitcoeff[-2]-=fitcoeff[-1]
fitcoeff=fitcoeff[:5]
wl=ds.columns.values.astype('float')#extract the wavelength
time=ds.index.values.astype('float')#extract the time
for i in range(0, len(wl), 1):
correcttimeval = np.polyval(fitcoeff, wl[i])
f = scipy.interpolate.interp1d((time-correcttimeval), ds.values[:,i], bounds_error=False, fill_value=0)
fixed_wave = f(time)
ds.values[:, i] = fixed_wave
return ds
else:
if save_file is None:
#lets start by choosing a good intensity
if hasattr(intensity_range,'__iter__'):
maxim=max(abs(np.array(intensity_range)))
intensity_range=maxim
elif intensity_range is None:
intensity_range=ds.abs().max().max()
intensities=(2**np.arange(-6.,4,1.))*intensity_range
window_difference=np.abs(shown_window[1]-shown_window[0])*0.1
timelimits=shown_window+np.asarray([-window_difference,window_difference])
for repeat in range(30):
fig,ax=plt.subplots()
ax = plot2d(ax = ax, cmap = cmap, ds = ds, wave_nm_bin = wave_nm_bin, scattercut = scattercut, bordercut = bordercut,
timelimits = timelimits, intensity_range = intensity_range,
title = 'select intensity where we can work,\n if happy, choose confirm or abort',
use_colorbar = False, plot_type = "linear", log_scale = False)
w=(ax.get_xlim()[1]-ax.get_xlim()[0])
ax.add_patch(matplotlib.patches.Rectangle((ax.get_xlim()[0],shown_window[1]),w,0.5,facecolor='white'))
for i,ent in enumerate(intensities):
ax.text(ax.get_xlim()[0]+i*w/10.,shown_window[1],'%.1g'%(2**np.arange(-6.,4,1.))[i],fontsize=8)
ax.add_patch( matplotlib.patches.Rectangle((ax.get_xlim()[0],timelimits[0]),w/4.,0.2,facecolor='white'))
ax.text(ax.get_xlim()[0],timelimits[0],'Accept',fontsize=15)
ax.add_patch(matplotlib.patches.Rectangle((ax.get_xlim()[0]+w*3./4.,timelimits[0]),w/4.,0.2,facecolor='white'))
ax.text(ax.get_xlim()[0]+w*3./4.,timelimits[0],'Cancel all',fontsize=15)
choice =plt.ginput(1,timeout=15)
factor=int((choice[0][0]-ax.get_xlim()[0])/(w/10.))
if choice[0][1] > shown_window[1]/2:
intensity_range=intensities[factor]
print((2**np.arange(-6.,4,1.))[factor])
intensity_range=[-intensity_range,intensity_range]
plt.close(fig)
continue
elif choice[0][1] < shown_window[0]/2.:#we choice to finish the choices
if choice[0][0] < ax.get_xlim()[0]+w/2:#
print('accept')
plt.close(fig)
break
else:
plt.close(fig)
return False
else:
print('click better please')
plt.close(fig)
continue
for repeat in range(10):
fig,ax=plt.subplots()
ax = plot2d(ax = ax, cmap = cmap, ds = ds, wave_nm_bin = wave_nm_bin, scattercut = scattercut, bordercut = bordercut,
timelimits = shown_window, intensity_range = intensity_range,
title = 'select points, rightclick = remove last, \n middle click (or both at once finishes ',
use_colorbar = False, plot_type = "linear", log_scale = False)
polypts=np.asarray(plt.ginput(n=max_points,timeout=300, show_clicks=True,mouse_add=1, mouse_pop=3, mouse_stop=2))
plt.close(fig)
fig,ax=plt.subplots()
ax = plot2d(ax = ax, ds = ds, cmap = cmap, wave_nm_bin = wave_nm_bin, scattercut = scattercut, bordercut = bordercut,
timelimits = shown_window, intensity_range = intensity_range,
title = 'like it? %i more attempts'%(9-repeat), use_colorbar = False,
plot_type = "linear", log_scale = False)
#Fit a polynomial of the form p(x) = p[2] + p[1] + p[0]
fitcoeff= np.polyfit(polypts[:, 0], polypts[:, 1], 4, full=False)
correcttimeval = np.polyval(fitcoeff, ds.columns.values.astype('float'))
ax.plot(ds.columns.values.astype('float'),correcttimeval)
ax.add_patch( matplotlib.patches.Rectangle((ax.get_xlim()[0],ax.get_ylim()[0]),w/4,0.2,facecolor='white'))
ax.text(ax.get_xlim()[0],ax.get_ylim()[0]+0.05,'Save',fontsize=20)
ax.add_patch(matplotlib.patches.Rectangle((ax.get_xlim()[0]+w*3/4,ax.get_ylim()[0]),w/4,0.2,facecolor='white'))
ax.text(ax.get_xlim()[0]+w*3/4,ax.get_ylim()[0]+0.05,'Redo',fontsize=20)
satisfied =plt.ginput(1)
plt.close(fig)
if satisfied[0][0] < ax.get_xlim()[0]+w/2:
print('accepted')
plt.close(fig)
break
elif repeat<8:
plt.close(fig)
continue
else:
plt.close(fig)
return False
#stdev = sum(residuals**2)/8
else:
with open(save_file,'r') as f:
fitcoeff=f.readline()
fitcoeff=np.array(fitcoeff.split(','),dtype='float')
if len(fitcoeff)==6:#old style params
fitcoeff[-2]-=fitcoeff[-1]
fitcoeff=fitcoeff[:5]
time=ds.index.values.astype('float')#extract the time
ds_new=ds.apply(lambda x:np.interp(x=time+np.polyval(fitcoeff,float(x.name)),xp=time,fp=x),axis=0,raw=False)
if save_file is None:
#finding where zero time is
for repeat in range(10):
fig,ax=plt.subplots()
ax = plot2d(ax = ax, cmap = cmap, ds = ds_new, wave_nm_bin = wave_nm_bin, scattercut = scattercut, bordercut = bordercut,
lintresh = np.max(timelimits), timelimits = timelimits, intensity_range = intensity_range,
title = 'uncorrected select new zero', plot_type = 'lin', use_colorbar = False, log_scale = False)
ax.plot(ax.get_xlim(),[0,0],'black',lw=0.5)
fittingto = np.array(plt.ginput(1)[0])[1]
print(fittingto)
fitcoeff[-1]+=fittingto
ds_new=ds.apply(lambda x:np.interp(x=time+np.polyval(fitcoeff,float(x.name)),xp=time,fp=x),axis=0,raw=False)
plt.close(fig)
fig,ax=plt.subplots()
ax = plot2d(ax = ax, ds = ds_new, cmap = cmap, wave_nm_bin = wave_nm_bin, scattercut = scattercut, bordercut = bordercut,
lintresh = np.max(timelimits), timelimits = timelimits, intensity_range = intensity_range,
title = 'corrected, please select', plot_type = 'lin', use_colorbar = False, log_scale = False)
ax.plot(ax.get_xlim(),[0,0],'black',lw=0.5)
w=(ax.get_xlim()[1]-ax.get_xlim()[0])
ax.add_patch( matplotlib.patches.Rectangle((ax.get_xlim()[0],ax.get_ylim()[0]),w/4,0.2,facecolor='white'))
ax.text(ax.get_xlim()[0],ax.get_ylim()[0]+0.05,'Save',fontsize=30)
ax.add_patch(matplotlib.patches.Rectangle((ax.get_xlim()[0]+w*3/4,ax.get_ylim()[0]),w/4,0.2,facecolor='white'))
ax.text(ax.get_xlim()[0]+w*3/4,ax.get_ylim()[0]+0.05,'Redo',fontsize=30)
satisfied =plt.ginput(1)
if satisfied[0][0] < ax.get_xlim()[0]+w/2:
print('accepted')
plt.close(fig)
break
elif repeat<8:
plt.close(fig)
continue
else:
plt.close(fig)
return False
print(fitcoeff)
if filename is None:
f='chirp.dat'
else:
f=filename.split('.')[0]
f=f+'_chirp' + '.dat'
if path is None:
with open(f, 'w') as opened_file:
opened_file.write(','.join(map(str,np.array(fitcoeff))))
else:
with open(check_folder(path=path,filename=f), 'w') as opened_file:
opened_file.write(','.join(map(str,np.array(fitcoeff))))
return ds_new
def build_c(times, mod = 'paral', pardf = None, sub_steps = 10):
'''
Build concentration matrix after model the parameters are:
resolution is the width of the rise time (at sigma 50% intensity)
This function can also be used to create illustration dynamics.
The parallel decays are created explicit, while the consecutive decays are
created by sampling the populations at the times given in the first vector and
evaluate the progression at a number of substeps defined bu sub_samples (10 by default)
Parameters
-----------
times : np.array
array with the times at which the dataframe should be generated. In general the
experimental times
mod : str, optional
this selects the model that is used to generate the concentrations.
1. 'paral' (Default) or 'exponential' both are equivalent
2. 'consecutive' or 'full_consecutive'
In 2 the 'consecutive' and 'full_consecutive' are different in that for consecutive
the optimization is done using 'exponential' (as it shoudl give the same times)
and then only in the last (final) iteration the 'full consecutive' differential
equation is used. This has significant speed advantages, but can lead to errors particularly
for the very fast times.
sub_step : int, optional
defines how many times the iterative loop (used in consecutive only) is sampling the concentrations
between the times given in "times"
pardf : pd.DataFrame
This dataframe must contain the parameter that are used for creating the dynamics
the parameter must be named with the index.
For the internal functions this must contain these keys:
* 't0' = zero time, mandatory
* 'resolution' = instrument response function, mandatory
* 'background',optional = if this keyword is present a flat constant background is created (=1 over the whole time)
* 'infinite',optional = if this keyword is present a new non decaying component is formed with the last decay time.
* 'k0,k1,...' = with increasing integers are taken as decay times. te number of these components is used to determine how many shall be generated.
Examples
---------
'''
choices = {'paral':0,'exponential':0,'consecutive':1,'full_consecutive':1}
model=choices[mod]
param=pardf.loc[pardf.is_rate,'value'].values.astype(float)
t0=float(pardf.loc['t0','value'])
resolution=float(pardf.loc['resolution','value'])
if model == 0:#parallel
c=np.exp(-1*np.tile(times-t0,(len(param),1)).T*param)
c[(times-t0)<0]=1
c*=np.tile(rise(x=times,sigma=resolution,begin=t0),(len(param),1)).T
c=pandas.DataFrame(c,index=times)
c.index.name='time'
if 'background' in list(pardf.index.values):
c['background']=1
if 'infinite' in list(pardf.index.values):
c['infinite']=rise(x=times,sigma=resolution,begin=t0)
if model == 1:#consecutive decays
n_decays=len(param)
if 'infinite' in list(pardf.index.values):
infinite=True
n_decays+=1
else:
infinite=False
decays=param
c=np.zeros((len(times),n_decays),dtype='float')
g=gauss(times,sigma=resolution/FWHM,mu=t0)
for i in range(1,len(times)):
dc=np.zeros((n_decays,1),dtype='float')
dt=(times[i]-times[i-1])/(sub_steps)
c_temp=c[i-1,:]
for j in range(int(sub_steps)):
for l in range(0,n_decays):
if l>0:
if infinite:
if l<(n_decays-1):
dc[l]=decays[l-1]*dt*c_temp[l-1]-decays[l]*dt*c_temp[l]
else:
dc[l]=decays[l-1]*dt*c_temp[l-1]
else:
dc[l]=decays[l-1]*dt*c_temp[l-1]-decays[l]*dt*c_temp[l]
else:
if infinite and n_decays==1:
dc[l]=g[i]*dt
else:
dc[l]=g[i]*dt-decays[l]*dt*c_temp[l]
for b in range(c.shape[1]):
c_temp[b] =np.nanmax([(c_temp[b]+float(dc[b])),0.])
c[i,:] =c_temp
c=pandas.DataFrame(c,index=times)
c.index.name='time'
if infinite:
labels=list(c.columns.values)
labels[-1]='Non Decaying'
if 'background' in list(pardf.index.values):
c['background']=1
else:
if 'background' in list(pardf.index.values):
c['background']=1
return c
def fill_int(ds,c,final=True,baseunit='ps',return_shapes=False):
'''solving the intensity an equation_way, takes the target dataframe and the concentration frame
prepares the matrixes(c) the tries to solve this equation system using
eps=np.linalg.lstsq(AA,Af,rcond=-1)[0]
if failes it returns a dictionary with 1000 as error (only entry) if successful
it returns a dictionary that contains the
fit_error = (AE**2).sum() with AE beeing the difference of measured and calcuated matrix
Parameters
-----------
ds : DataFrame
DataFrame to be fitted
c: DataFrame
DataFrame oontaining the concentration matrix (the concentrations as with the times as index.
Each different species has a column with the species name as column name
final : bool,optional
if True (Default) the complete solutions will be attached otherwise only the error is attached
baseunit : str,optional
this string is used as unit for the time axis
return_shapes : bool,optional
Default = False, if True, then the concentrations and spectra are added to the re (even if not final)
Returns
------------------
re : dict
the dictionary "re" attached to the object containing all the matrixes and parameter.
if "final" is True:
* "A" Shaped measured Matrix
* "AC" Shaped calculated Matrix
* "AE" Difference between A and AC = linear error
* "DAC" DAS or SAS, labeled after the names given in the function (the columns of c) Care must be taken that this mesured intensity is C * DAS, the product. For exponential model the concentrations are normalized
* "c" The Concentrations (meaning the evolution of the concentrations over time. Care must be taken that this mesured intensity is C * DAS, the product. For exponential model the concentrations are normalized
* "error" is the S2, meaning AE**2.sum().sum()
else:
* "error" is the S2, meaning AE**2.sum()
'''
time=ds.index.values.astype('float')
wl=ds.columns.values.astype('float')
time_label=ds.index.name
energy_label=ds.columns.name
A=ds.values
er=c.values
ert = er.T
AA = np.matmul(ert,er)
Af = np.matmul(ert,A)
try:
eps=np.linalg.lstsq(AA,Af,rcond=-1)[0]
except:
re={'error':1000}
return re
eps[np.isnan(eps)]=0
eps[np.isinf(eps)]=0
AC = np.matmul(er,eps);
AE = A-AC;
fit_error = (AE**2).sum()
if final:
A=pandas.DataFrame(A,index=time,columns=wl)
AC=pandas.DataFrame(AC,index=time,columns=wl)
AE=pandas.DataFrame(AE,index=time,columns=wl)
DAC=pandas.DataFrame(eps.T,index=wl)
A.index.name=time_label
A.columns.name=energy_label
AC.index.name=time_label
AC.columns.name=energy_label
AE.index.name=time_label
AE.columns.name=energy_label
DAC.index.name=energy_label
re={'A':A,'AC':AC,'AE':AE,'DAC':DAC,'error':fit_error,'c':c}
elif return_shapes:
re={'DAC':DAC,'error':fit_error,'c':c}
else:
re={'error':fit_error}
return re
def err_func(paras, ds, mod = 'paral', final = False, log_fit = False, dump_paras = False, filename = None, ext_spectra = None, dump_shapes = False):
'''function that calculates and returns the error for the global fit. This function is intended for
fitting a single dataset.
Parameters
--------------
ds : DataFrame
This dataframe contains the data to be fitted. This has to be shaped as it is intended to (so all shping parameters
already applied. The dataframe expects the time to be in Index and the wavelength/energy
to be in the columns. The spectra is plotted with a second (energy) axis
paras : lmfit parameter oject
The parameter object that defines what is calculated
mod : str or function, optional
The model selection is depending if it is an internal or external model.
The internal functions are triggered by calling their name
Two main are currently implemented
1. 'paral' (Default) or 'exponential'
2. 'consecutive' or 'full_consecutive'
In 2 the 'consecutive' and 'full_consecutive' are different in that for consecutive
the optimization is done using 'exponential' (as it shoudl give the same times)
and then only in the last (final) iteration the 'full consecutive' differential
equation is used. This has significant speed advantages, but can lead to errors particularly
for the very fast times.
As external model a function is handed to this parameter, this function
must accept the times and an paramater Dataframe and return a DataFrame
with the concentrations (similar to build_c)
for the internal functions:
This datafram must contain the parameter that are used for creadting the dynamics
the parameter must be named with the index.
't0' = zero time, mandatory
'resolution' = instrument response function, mandatory
'background',optional = if this keyword is present a flat constant background is created (=1 over the whole time)
'infinite',optional = if this keyword is present a new non decaying component is formed with the last decay time.
'k0,k1,...' = with increasing integers are taken as decay times. te number of these components is used to determine how many shall be generated.
final : bool, optional
this switch decides if just the squared error is returned (for False) (Default) or if the full
matrixes are returned, including the r2 are returned.
log_fit : bool, optional
if False (Default) then the parameter are handed to the fitting function as they are, if true
then all times are first converted to log space.
dump_paras : bool, optional
(Default) is False, If True creates two files in the working folder, one with the
currently used parameter created at the end of each optimisation step, and one with
the set of parameter that up to now gave the lowest error. Intented to store
the optimisation results if the fit needs to be interrupted
(if e.g. Ampgo simply needs to long to optimize.) useful option if things are slow
filename : None or str, optional
Only used in conjunction with 'dump_paras'. The program uses this filename to dump the
parameter to disk
ext_spectra : DataFrame, optional
(Default) is None, if given substract this spectra from the DataMatrix using the intensity
given in "C(t)" this function will only work for external models. The name of the spectral column
must be same as the name of the column used. If not the spectrum will be ignored. The spectrum will
be interpolated to the spectral points of the model ds before the substraction.
'''
time_label=ds.index.name
energy_label=ds.columns.name
pardf=par_to_pardf(paras)
if log_fit:
pardf.loc[pardf.is_rate,'value']=pardf.loc[pardf.is_rate,'value'].apply(lambda x: 10**x)
if isinstance(mod,type('hello')):#did we use a build in model?
if final:#for final we really want the model
c=build_c(times=ds.index.values.astype('float'),mod=mod,pardf=pardf)
elif 'full_consecutive' in mod:# here we force the full consecutive modelling
c=build_c(times=ds.index.values.astype('float'),mod=mod,pardf=pardf)
else:#here we "bypass" the full consecutive and optimize the rates with the decays
c=build_c(times=ds.index.values.astype('float'),mod='paral',pardf=pardf)
c.index.name=time_label
if ext_spectra is None:
re=fill_int(ds=ds,c=c, return_shapes = dump_shapes)
else:
if 'ext_spectra_shift' in list(pardf.index.values):
ext_spectra.index=ext_spectra.index.values+pardf.loc['ext_spectra_shift','value']
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
else:
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
if "ext_spectra_scale" in list(pardf.index.values):
ext_spectra=ext_spectra*pardf.loc['ext_spectra_scale','value']
c_temp=c.copy()
for col in ext_spectra.columns:
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
ds=ds-C
c_temp.drop(col,axis=1,inplace=True)
re=fill_int(ds=ds,c=c_temp, return_shapes = dump_shapes)
if final:
labels=list(re['DAC'].columns.values)
changed=True
if 'background' in list(pardf.index.values):
if 'infinite' in list(pardf.index.values):
labels[-1]='Non Decaying'
labels[-2]='background'
else:
labels[-1]='background'
else:
if 'infinite' in list(pardf.index.values):
labels[-1]='Non Decaying'
else:changed=False
if changed:
re['DAC'].columns=labels
re['c'].columns=labels
if not ext_spectra is None:
for col in ext_spectra.columns:
re['DAC'][col]=ext_spectra.loc[:,col].values
re['c'][col]=c.loc[:,col].values
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
re['A']=re['A']+C
re['AC']=re['AC']+C
re['r2']=1-re['error']/((re['A']-re['A'].mean().mean())**2).sum().sum()
if dump_paras:
try:
pardf.loc['error','value']=re['error']
except:
pass
try:
pardf.loc['r2','value']=re['r2']
except:
pass
try:
if filename is None:
store_name='minimal_dump_paras.par'
else:
store_name='minimal_dump_paras_%s.par'%filename
min_df=pandas.read_csv(store_name,sep=',',header=None,skiprows=1)
if float(min_df.iloc[-1,1])>float(re['error']):
pardf.to_csv(store_name)
except:
pass
if filename is None:
store_name='dump_paras.par'
else:
store_name='dump_paras_%s.par'%filename
pardf.to_csv(store_name)
return re
else:
if dump_paras:
try:
pardf.loc['error','value']=re['error']
except:
pass
try:
pardf.loc['r2','value']=re['r2']
except:
pass
try:
if filename is None:
store_name='minimal_dump_paras.par'
else:
store_name='minimal_dump_paras_%s.par'%filename
min_df=pandas.read_csv(store_name,sep=',',header=None,skiprows=1)
if float(min_df.iloc[-1,1])>float(re['error']):
pardf.to_csv(store_name)
except:
pass
if filename is None:
store_name='dump_paras.par'
else:
store_name='dump_paras_%s.par'%filename
pardf.to_csv(store_name)
if not mod in ['paral','exponential','consecutive']:
print(re['error'])
if dump_shapes:
re['c'].to_csv(path_or_buf=filename + '_c')
re['DAC'].to_csv(path_or_buf=filename + '_DAC')
return re['error']
else:
c=mod(times=ds.index.values.astype('float'),pardf=pardf.loc[:,'value'])
if ext_spectra is None:
re=fill_int(ds=ds,c=c, return_shapes = dump_shapes)
else:
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
c_temp=c.copy()
for col in ext_spectra.columns:
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
ds=ds-C
c_temp.drop(col,axis=1,inplace=True)
re=fill_int(ds=ds,c=c_temp, return_shapes = dump_shapes)
if final:
if len(re.keys())<3:#
print('error in the calculation')
return re
if ext_spectra is None:
re['DAC'].columns=c.columns.values
re['c'].columns=c.columns.values
else:
re['DAC'].columns=c_temp.columns.values
re['c'].columns=c_temp.columns.values
for col in ext_spectra.columns:
re['DAC'][col]=ext_spectra.loc[:,col].values
re['c'][col]=c.loc[:,col].values
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
re['A']=re['A']+C
re['AC']=re['AC']+C
re['r2']=1-re['error']/((re['A']-re['A'].mean().mean())**2).sum().sum()
if dump_paras:
try:
pardf.loc['error','value']=re['error']
except:
pass
try:
min_df=pandas.read_csv('minimal_dump_paras.par',sep=',',header=None,skiprows=1)
if float(min_df.iloc[-1,1])>float(re['error']):
pardf.to_csv('minimal_dump_paras.par')
except:
pass
pardf.to_csv('dump_paras.par')
return re
else:
if dump_paras:
try:
pardf.loc['error','value']=re['error']
except:
pass
try:
min_df=pandas.read_csv('minimal_dump_paras.par',sep=',',header=None,skiprows=1)
if float(min_df.iloc[-1,1])>float(re['error']):
pardf.to_csv('minimal_dump_paras.par')
except:
pass
pardf.to_csv('dump_paras.par')
print(re['error'])
if dump_shapes:
re['c'].to_csv(path_or_buf=filename + '_c')
re['DAC'].to_csv(path_or_buf=filename + '_DAC')
return re['error']
def err_func_multi(paras, mod = 'paral', final = False, log_fit = False, multi_project = None,
unique_parameter = None, weights = None, dump_paras = False, filename = None,
ext_spectra = None, dump_shapes = False, same_DAS = False):
'''function that calculates and returns the error for the global fit. This function is intended for
fitting of multiple datasets
Parameters
--------------
paras : lmfit parameter oject
The parameter object that defines what is calculated
mod : str or function, optional
The model selection is depending if it is an internal or external model.
The internal functions are triggered by calling their name
Two main are currently implemented
1. 'paral' (Default) or 'exponential'
2. 'consecutive' or 'full_consecutive'
In 2 the 'consecutive' and 'full_consecutive' are different in that for consecutive
the optimization is done using 'exponential' (as it shoudl give the same times)
and then only in the last (final) iteration the 'full consecutive' differential
equation is used. This has significant speed advantages, but can lead to errors particularly
for the very fast times.
for the internal functions:
This datafram must contain the parameter that are used for creadting the dynamics
the parameter must be named with the index.
't0' = zero time, mandatory
'resolution' = instrument response function, mandatory
'background',optional = if this keyword is present a flat constant background is created (=1 over the whole time)
'infinite',optional = if this keyword is present a new non decaying component is formed with the last decay time.
'k0,k1,...' = with increasing integers are taken as decay times. te number of these components is used to determine how many shall be generated.
As external model a function is handed to this parameter, this function
must accept the times and an paramater Dataframe and return a DataFrame
with the concentrations (similar to build_c)
final : bool, optional
this switch decides if just the squared error is returned (for False) (Default) or if the full
matrixes are returned, including the r2 are returned.
log_fit : bool, optional
if False (Default) then the parameter are handed to the fitting function as they are, if true
then all times are first converted to log space.
dump_paras : bool, optional
(Default) is False, If True creates two files in the working folder, one with the
currently used parameter created at the end of each optimisation step, and one with
the set of parameter that up to now gave the lowest error. Intented to store
the optimisation results if the fit needs to be interrupted
(if e.g. Ampgo simply needs to long to optimize.) useful option if things are slow
filename : None or str, optional
Only used in conjunction with 'dump_paras'. The program uses this filename to dump the
parameter to disk
multi_project : None or list (of TA projects), optional
This switch is triggering the simultaneous optimisation of multiple datasets.
multi_project is as (Default) None. it expects an iterable (typically list) with other
TA projects (like ta) that are then optimised with the same parameter.
This means that all projects get the same parameter object for each iteration
of the fit and return their individual error, which is summed linearly.
The "weights" option allows to give each multi_project a specific weight (number)
that is multiplied to the error. If the weight object has the same number of items
as the multi_project it is assumed that the triggering object (the embedded project)
has the weight of 1, otherwise the first weight is for the embedded project.
The option 'unique_parameter' takes (a list) of parameter that are not
to be shared between the projects (and that are not optimized either)
The intended use of this is to give e.g. the pump power for multiple experiments to
study non linear behaviour. Returned will be only the parameter set for the optimium
combination of all parameter. Internally, we iterate through the projects and calculate
for each project the error for each iteration. Important to note is that currently this
means that each DAS/SAS is calculated independently! For performing the same calculation
with a single DAS, the Matrixes need to be concatenated before the run and an external
function used to create a combined model. As this is very difficult to implement reliably
For general use (think e.g. different pump wavelength) this has to be done manually.
unique_parameter : None or str or list (of strings), optional
only used in conjunction with 'multi_project', it takes (a list) of parameter that
are not to be shared between the projects (and that are not optimized either)
The intended use of this is to give e.g. the pump power for multiple experiments
to study non linear behaviour. (Default) None
same_DAS : bool,optional
changes the fit behavior and uses the same DAS for the optimization. This means that the ds are stacked before the fill int rounds
weights : list of floats, optional
only used in conjunction with 'multi_project'. The "weights" option allows to
give each multi\_project a specific weight (number) that is multiplied to the error.
If the weight object has the same number of items as the 'multi_project' it is assumed
that ta (the embedded project) has the weight of 1, otherwise the first weight is for the
embedded object
ext_spectra : DataFrame, optional
(Default) is None, if given substract this spectra from the DataMatrix using the intensity
given in "C(t)" this function will only work for external models. The name of the spectral column
must be same as the name of the column used. If not the spectrum will be ignored. The spectrum will
be interpolated to the spectral points of the model ds before the substraction.
'''
pardf_changing=par_to_pardf(paras)
error_listen=[]
r2_listen=[]
slice_setting_object=multi_project[0].Copy()
####### new same DAS, I'm lazy and will doublicate te loop. ###########
if same_DAS:
c_stack=[]
ds_stack=[]
par_stack=[]
height_stack=[]
for i,ta in enumerate(multi_project):
ds = sub_ds(ds = ta.ds, scattercut = slice_setting_object.scattercut, bordercut = slice_setting_object.bordercut,
timelimits = slice_setting_object.timelimits, wave_nm_bin = slice_setting_object.wave_nm_bin,
time_bin = slice_setting_object.time_bin, ignore_time_region = slice_setting_object.ignore_time_region)
pardf=pardf_changing.copy()
try:#let's see if the project has an parameter object
pardf_ori=par_to_pardf(ta.par)
except:
pardf_ori=pardf
if unique_parameter is not None:
for key in unique_parameter:
pardf.loc[key,'value']=pardf_ori.loc[key,'value']
par_stack.append(pardf)
if log_fit:
pardf.loc[pardf.is_rate,'value']=pardf.loc[pardf.is_rate,'value'].apply(lambda x: 10**x)
if isinstance(mod,type('hello')):#did we use a build in model?
c=build_c(times=ds.index.values.astype('float'),mod=mod,pardf=pardf)
else:
c=mod(times=ds.index.values.astype('float'),pardf=pardf.loc[:,'value'])
if ext_spectra is None:
c_temp=c.copy()
else:
if 'ext_spectra_shift' in list(pardf.index.values):
ext_spectra.index=ext_spectra.index.values+pardf.loc['ext_spectra_shift','value']
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
else:
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
if "ext_spectra_scale" in list(pardf.index.values):
ext_spectra=ext_spectra*pardf.loc['ext_spectra_scale','value']
c_temp=c.copy()
for col in ext_spectra.columns:
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
ds=ds-C
c_temp.drop(col,axis=1,inplace=True)
if not weights is None:
if len(weights)==len(multi_project)-1:
weights=list(weights)
weights.insert(0,1)
elif len(weights)!=len(multi_project):
Ex = ValueError()
Ex.strerror='The number of entries i the list must either be the number of all elements (including \"TA\" or the number of elements in other. In this case the element ta gets the weight=1'
raise Ex
ds_stack.append(ds*weights[i])
else:
ds_stack.append(ds)
c_stack.append(c_temp)
height_stack.append(len(c_temp.index.values))
A_con=pandas.concat(ds_stack)
c_con=pandas.concat(c_stack)
re=fill_int(ds=A_con,c=c_con, return_shapes = dump_shapes, final =final)
if dump_paras:
try:
pardf.loc['error','value']=re['error']
except:
pass
if final:
try:
pardf.loc['r2','value']=1-re['error']/((re['A']-re['A'].mean().mean())**2).sum().sum()
except:
pass
try:
if filename is None:
store_name='minimal_dump_paras.par'
else:
store_name='minimal_dump_paras_%s.par'%filename
min_df=pandas.read_csv(store_name,sep=',',header=None,skiprows=1)
if float(min_df.iloc[-1,1])>float(combined_error):
pardf.to_csv(store_name)
except:
pass
if filename is None:
store_name='dump_paras.par'
else:
store_name='dump_paras_%s.par'%filename
try:
pardf.to_csv(store_name)
except:
print('Saving of %s failed'%store_name)
if final:
if isinstance(mod,type('hello')):#did we use a build in model?
labels=list(re['DAC'].columns.values)
changed=True
if 'background' in list(pardf.index.values):
if 'infinite' in list(pardf.index.values):
labels[-1]='Non Decaying'
labels[-2]='background'
else:
labels[-1]='background'
else:
if 'infinite' in list(pardf.index.values):
labels[-1]='Non Decaying'
else:changed=False
if changed:
re['DAC'].columns=labels
re['c'].columns=labels
if not ext_spectra is None:
for col in ext_spectra.columns:
re['DAC'][col]=ext_spectra.loc[:,col].values
re['c'][col]=c.loc[:,col].values
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
re['A']=re['A']+C
re['AC']=re['AC']+C
else:
re['DAC'].columns=c.columns.values
re['c'].columns=c.columns.values
if not ext_spectra is None:
for col in ext_spectra.columns:
re['DAC'][col]=ext_spectra.loc[:,col].values
re['c'][col]=c.loc[:,col].values
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
re['A']=re['A']+C
re['AC']=re['AC']+C
return_listen=[]
for i,ta in enumerate(multi_project):
re_local={}
if i==0:
lower=0
else:
lower=np.array(height_stack)[:i].sum()
re_local['A']=re['A'].copy().iloc[lower:lower+height_stack[i],:]
re_local['AC']=re['AC'].copy().iloc[lower:lower+height_stack[i],:]
re_local['AE']=re['AE'].copy().iloc[lower:lower+height_stack[i],:]
re_local['c']=re['c'].copy().iloc[lower:lower+height_stack[i],:]
re_local['error_total']=re['error']
re_local['error']=(re['AE']**2).sum().sum()
re_local['DAC']=re['DAC'].copy()
re_local['r2']=1-re_local['error']/((re_local['A']-re_local['A'].mean().mean())**2).sum().sum()
re_local['r2_total']=1-re['error']/((re['A']-re['A'].mean().mean())**2).sum().sum()
re_local['pardf']=par_stack[i]
try:
re_local['filename']=filename
except:
pass
return_listen.append(re_local)
if not mod in ['paral','exponential','consecutive']:
print(re['error'])
if final:
return return_listen
else:
return re['error']
################### not same DAS####################
else:
for i,ta in enumerate(multi_project):
ds = sub_ds(ds = ta.ds, scattercut = slice_setting_object.scattercut, bordercut = slice_setting_object.bordercut,
timelimits = slice_setting_object.timelimits, wave_nm_bin = slice_setting_object.wave_nm_bin,
time_bin = slice_setting_object.time_bin, ignore_time_region = slice_setting_object.ignore_time_region)
pardf=pardf_changing.copy()
try:#let's see if the project has an parameter object
pardf_ori=par_to_pardf(ta.par)
except:
pardf_ori=pardf
if unique_parameter is not None:
for key in unique_parameter:
pardf.loc[key,'value']=pardf_ori.loc[key,'value']
if log_fit:
pardf.loc[pardf.is_rate,'value']=pardf.loc[pardf.is_rate,'value'].apply(lambda x: 10**x)
if isinstance(mod,type('hello')):#did we use a build in model?
c=build_c(times=ds.index.values.astype('float'),mod=mod,pardf=pardf)
if ext_spectra is None:
re=fill_int(ds=ds,c=c, return_shapes = dump_shapes)
else:
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
c_temp=c.copy()
for col in ext_spectra.columns:
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
ds=ds-C
c_temp.drop(col,axis=1,inplace=True)
re=fill_int(ds=ds,c=c_temp, return_shapes = dump_shapes)
if final:
if i==0:
labels=list(re['DAC'].columns.values)
changed=True
if 'background' in list(pardf.index.values):
if 'infinite' in list(pardf.index.values):
labels[-1]='Non Decaying'
labels[-2]='background'
else:
labels[-1]='background'
else:
if 'infinite' in list(pardf.index.values):
labels[-1]='Non Decaying'
else:changed=False
if changed:
re['DAC'].columns=labels
re['c'].columns=labels
if not ext_spectra is None:
for col in ext_spectra.columns:
re['DAC'][col]=ext_spectra.loc[:,col].values
re['c'][col]=c.loc[:,col].values
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
re['A']=re['A']+C
re['AC']=re['AC']+C
re_final=re.copy()
error_listen.append(re['error'])
r2_listen.append(1-re['error']/((re['A']-re['A'].mean().mean())**2).sum().sum())
else:
if dump_shapes:
re['c'].to_csv(path_or_buf=ta.filename + '_c')
re['DAC'].to_csv(path_or_buf=ta.filename + '_DAC')
error_listen.append(re['error'])
else:
c=mod(times=ds.index.values.astype('float'),pardf=pardf.loc[:,'value'])
if ext_spectra is None:
re=fill_int(ds=ds,c=c, return_shapes = dump_shapes)
else:
ext_spectra=rebin(ext_spectra,ds.columns.values.astype(float))
c_temp=c.copy()
for col in ext_spectra.columns:
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
ds=ds-C
c_temp.drop(col,axis=1,inplace=True)
re=fill_int(ds=ds,c=c_temp, return_shapes = dump_shapes)
if final:
if i==0:
re['DAC'].columns=c.columns.values
re['c'].columns=c.columns.values
if not ext_spectra is None:
for col in ext_spectra.columns:
re['DAC'][col]=ext_spectra.loc[:,col].values
re['c'][col]=c.loc[:,col].values
A,B=np.meshgrid(c.loc[:,col].values,ext_spectra.loc[:,col].values)
C=pandas.DataFrame((A*B).T,index=c.index,columns=ext_spectra.index.values)
re['A']=re['A']+C
re['AC']=re['AC']+C
re_final=re.copy()
error_listen.append(re['error'])
r2_listen.append(1-re['error']/((re['A']-re['A'].mean().mean())**2).sum().sum())
else:
if dump_shapes:
re['c'].to_csv(path_or_buf=filename + '_c')
re['DAC'].to_csv(path_or_buf=filename + '_DAC')
error_listen.append(re['error'])
if not weights is None:
if len(weights)==len(error_listen)-1:
weights=list(weights)
weights.insert(0,1)
elif len(weights)!=len(error_listen):
Ex = ValueError()
Ex.strerror='The number of entries i the list must either be the number of all elements (including \"TA\" or the number of elements in other. In this case the element ta gets the weight=1'
raise Ex
combined_error=np.sqrt(((np.array(error_listen)*np.array(weights))**2).mean())
if final:
combined_r2=np.sqrt(((np.array(r2_listen)*np.array(weights))**2).mean())
else:
combined_error=np.sqrt((np.array(error_listen)**2).mean())
if final:
combined_r2=np.sqrt(((np.array(r2_listen))**2).mean())
if final:
re_final['error']=combined_error
re_final['r2']=combined_r2
if dump_paras:
try:
pardf.loc['error','value']=combined_error
except:
pass
try:
pardf.loc['r2','value']=combined_r2
except:
pass
try:
if filename is None:
store_name='minimal_dump_paras.par'
else:
store_name='minimal_dump_paras_%s.par'%filename
min_df=pandas.read_csv(store_name,sep=',',header=None,skiprows=1)
if float(min_df.iloc[-1,1])>float(combined_error):
pardf.to_csv(store_name)
except:
pass
if filename is None:
store_name='dump_paras.par'
else:
store_name='dump_paras_%s.par'%filename
try:
pardf.to_csv(store_name)
except:
print('Saving of %s failed'%store_name)
if not mod in ['paral','exponential','consecutive']:
print(combined_error)
if final:
return re_final
else:
return combined_error
def par_to_pardf(par):
'''function to convert a parameters object into a pretty DataFrame, it expects par to be a lmfit parameters object and loops through the keys'''
out_dicten={}
for key in par.keys():
out_dicten[key]={'value':par[key].value}
if key[0] == 'k':#its a time parameter
out_dicten[key]['is_rate']=True
else:
out_dicten[key]['is_rate']=False
out_dicten[key]['min']=par[key].min
out_dicten[key]['max']=par[key].max
out_dicten[key]['vary']=par[key].vary
out_dicten[key]['expr']=par[key].expr
return pandas.DataFrame(out_dicten).T
def pardf_to_par(par_df):
'''converts a dataframe to lmfit object
set(value=None, vary=None, min=None, max=None, expr=None, brute_step=None)'''
par=lmfit.Parameters()
for key in par_df.index.values:
par.add(key, value=par_df.loc[key,'value'], vary=par_df.loc[key,'vary'], min=par_df.loc[key,'min'], max=par_df.loc[key,'max'], expr=par_df.loc[key,'expr'])
return par
def pardf_to_timedf(pardf):
'''inverts all the rates to times in a dataframe'''
timedf=pardf.copy()
if 'upper_limit' in pardf.keys():
for key in ['init_value','value','min','max','lower_limit','upper_limit']:
for row in pardf.index.values:
if timedf.loc[row,'is_rate']:
if key == 'min':key_in='max'
elif key == 'max':key_in='min'
elif key == 'lower_limit':key_in='upper_limit'
elif key == 'upper_limit':key_in='lower_limit'
else:key_in=key
try:
if pardf.loc[row,key] !=0:
timedf.loc[row,key_in]=1/pardf.loc[row,key]
else:
timedf.loc[row,key_in]='inf'
except:
if key == 'init_value':pass#we don't save the init values, so we get an error when converting the saved file
elif pardf.loc[row,key] is None:continue
else:print('conversion of this key failed: %s %s'%(row,key))
else:
for key in ['init_value','value','min','max']:
if key == 'min':key_in='max'
elif key == 'max':key_in='min'
else:key_in=key
try:
timedf.loc[pardf.is_rate,key_in]=pardf.loc[pardf.is_rate,key].apply(lambda x: 1/x if x!=0 else 'inf')
except:
if key == 'init_value':pass#we don't save the init values, so we get an error when converting the saved file
else:print('conversion of this key failed:' + key)
return timedf
class TA(): # object wrapper for the whole
def __init__(self, filename, path = None, sep = "\t", decimal = '.', index_is_energy = False, transpose = False,
sort_indexes = False, divide_times_by = None, shift_times_by = None, external_time = None, external_wave = None,
use_same_name = True, data_type = None , units = None, baseunit = None, ds = None, conversion_function = None):
'''Function that opens and imports data into an TA object
it is designed to open combined files that contain both the wavelength and the time. (e.g. SIA files as recorded by Pascher instruments software) or hdf5 projects saved by this software
There are however a lot of additional options to open other ascii type files and adapt their format internally
Attention times with Nan will be completely removed during the import
Parameters
----------
filename : str
* expects a filename in string form for opening a single file.
* alternatively 'gui' can be set as filename, then a TKinter gui is opened for select.
* alternatively 'recent' can given as key word. in this case it tries to find a text file named "recent.dat" that should contain the path to the last file opened with the GUI. this file is then opened. if this file is not found the GUI is opened instead
path : str or path object (optional)
if path is a string without the operation system dependent separator, it is treated as a relative path,
e.g. data will look from the working directory in the sub director data. Otherwise this has to be a
full path in either strong or path object form.
sep : str (optional)
is the separator between different numbers, typical are tap (Backslash t) (Default) ,one or
multiple white spaces 'backslash s+' or comma ','.
decimal : str (optional)
sets the ascii symbol that is used for the decimal sign. In most countries this is '.'(Default)
but it can be ',' in countries like Sweden or Germany
index_is_energy : bool (optional)
switches if the wavelength is given in nm (Default) or in eV (if True), currently everything
is handled as wavelength in nm internally
data_type: str (optional)
data_type is the string that represents the intensity measurements. Usually this contains if absolute
of differential data. This is used for the color intensity in the 2d plots and the y-axis for the 1d plots
units: str (optional)
this is used to identify the units on the energy axis and to label the slices, recognized is 'nm', 'eV' and 'keV'
but if another unit like 'cm^-1' is used it will state energy in 'cm^-1'. Pleas observe that if you use the index_is_energy
switch the program tries to convert this energy into wavelength.
baseunit: str (optional)
this is used to identify the units on the developing/time axis. This is name that is attached to the index of the dataframe.
setting this during import is equivalent to ta.baseunit
transpose : bool (optional)
if this switch is False (Default) the wavelength are the columns and the rows the times.
sort_indexes : bool (optional)
For False (Default) I assume that the times and energies are already in a rising order.
with this switch, both are sorted again.
divide_times_by : None or float (optional)
here a number can be given that scales the time by an arbitary factor. This is actually dividing
the times by this value. Alternatively there is the variable self.baseunit. The latter only affects
what is written on the axis, while this value is actually used to scale the times. None (Default)
ignores this
shift_times_by : None, float (optional)
This a value by which the time axis is shifted during import. This is a useful option of e.g.
the recording software does not compensate for t0 and the data is always shifted.
None (Default) ignores this setting
data_type : str, None
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
external_time : None or str (optional)
Here a filename extension (string) can be given that contains the time vector.
The file is assumed to be at the same path as the data and to contain a single
type of separated data without header.
If use_same_name = True (default)
It assumes that this is the ending for the file. The filename itself is taken from the filename.
e.g. if samp1.txt is the filename and external_time='.tid' the program searches
samp1.tid for the times. The transpose setting is applied and sets where the times are
to be inserted (row or column indexes)
If use_same_name = False this should be the file containing the vector for the time (in the same format as the main file)
external_wave : None or str (optional)
Here a filename extension (string) can be given that contains the wavelength vector.
If use_same_name = True (default)
The file is assumed to be at the same path as the data and to contain a single type
of separated data without header. This is the ending for the file. The filename itself
is taken from the filename. e.g. if samp1.txt is the filename and external_wave='.wav'
then the program searches samp1.wav for the wavelength. The transpose setting is applied
and sets where the wavelength are to be inserted (columns or row indexes)
If use_same_name = False
this should be a full filename that contains the vector
use_same_name : bool, optional
this switches if the external filename included the loaded filename or is a separate file True(default)
ds: pandas.DataFrame (optional)
feed in an external dataframe instead of opening a file
conversion_function: function(optional)
function that receives should have the shape:
return pandas Dataframe with time/frames in rows and wavelength/energy in columns,
The function is tested to accept (in that order) a
my_function(filename, external_time,external_wave),
my_function(filename, external_time),
my_function(filename,external_wave),
my_function(filename) and
return: the dataframe ds with the time_axis as rows and spectral axis as columns
if the ds.index.name ia not empty the "time axis" is in to that name the spectral axis is in ds.columns.name
the return is investigated if it is one, two, or three things.
if two are returned then the second must be the name of what the intensity axis is. This value will then be set to data_type
if three are returned the third is the baseunit (for the time axis) this allows to use the automatic naming in ps or nanosecond
If the values units, data_type or baseunit are (manually) set in the import function the corresponding entries in
datafram will be overwritten
shift_times_by and divide_times_by will be applied if not None (useful to adjust for offset before chirp correction)
Returns
-------
A TA object with all parameter initialized
Examples
--------------
Typical useage:
>>> import plot_func as pf #import the module and give it a shorter name
>>> ta=pf.TA('gui') #use a GUI to open
>>> ta=pf.TA('sample_1.SIA') #use a filename in the same folder
>>> ta=pf.TA('sample_1.hdf5',path='Data') #use a filename in the folder 'Data'
Opening a list of files with external time vector (of the same name) so it looks for a data
file "fite1.txt" and a file with the time information "file1.tid"
>>>ta=pf.TA('file1.txt', external_time = 'tid')
'''
self.path=check_folder(path=path,current_path=os.getcwd())
self.filename=filename
if ds is not None:
if filename is None:
filename = 'external'
self.filename='external'
if filename == 'gui':
root_window = tkinter.Tk()
root_window.withdraw()
root_window.attributes('-topmost',True)
root_window.after(1000, lambda: root_window.focus_force())
complete_path = filedialog.askopenfilename(initialdir=os.getcwd())
listen=os.path.split(complete_path)
path=os.path.normpath(listen[0])
self.path=path
filename=listen[1]
self.filename=filename
with open('recent.dat','w') as f:
f.write(complete_path)
elif filename == 'recent':
try:
with open('recent.dat','r') as f:
complete_path = f.readline()
listen=os.path.split(complete_path)
path=os.path.normpath(listen[0])
self.path=path
filename=listen[1]
self.filename=filename
except:
root_window = tkinter.Tk()
root_window.withdraw()
root_window.attributes('-topmost',True)
root_window.after(1000, lambda: root_window.focus_force())
complete_path = filedialog.askopenfilename(initialdir=os.getcwd())
listen=os.path.split(complete_path)
path=os.path.normpath(listen[0])
self.path=path
filename=listen[1]
self.filename=filename
with open('recent.dat','w') as f:
f.write(complete_path)
if filename == 'external':#use a provided dataframe (ds) instead
if data_type is not None:
self.data_type = data_type
if units is not None:
self.units = units
try:
if len(ds.columns.name)==0:
ds.columns.name= units
except:
pass
else:
try:
if len(ds.columns.name)!=0:
self.units = ds.columns.name
except:
pass
if baseunit is not None:
self.baseunit = baseunit
try:
if len(ds.index.name)==0:
if (baseunit == 'ps') or (baseunit == 'ns'):
ds.index.name='Time in %s'%baseunit
else:
ds.index.name= baseunit
except:
pass
else:
try:
if len(ds.index.name)!=0:
self.baseunit = ds.index.name
except:
pass
self.ds_ori=ds
self.ds=ds
self.__make_standard_parameter()
elif ('hdf5' in filename) and (conversion_function is None):#we load a conversion function to deal with the file:#we read in data from previous run
self.__read_project(saved_project=check_folder(path=self.path,filename=self.filename))
self.__make_standard_parameter()
self.Cor_Chirp(fitcoeff=self.fitcoeff)
else:#we read in raw data from sia File
if conversion_function is not None:
try:
ret=conversion_function(filename = filename, external_time = external_time, external_wave = external_wave)
except:
try:
ret=conversion_function(filename = filename, external_time = external_time)
except:
try:
ret=conversion_function(filename = filename, external_wave = external_wave)
except:
try:
ret=conversion_function(filename = filename)
except Exception as e:
print(e)
return False
if isinstance(ret,pandas.DataFrame):
##import is what we wanted
ds=ret
elif isinstance(ret,pandas.Series):
ds=ret.as_frame()
else:
if len(ret) == 2:
if data_type is None:
ds,data_type=ret
else:
ds,_=ret
elif len(ret) == 3:
if data_type is None:
ds,data_type,baseunit=ret
else:
ds,_,baseunit=ret
else:
print('sorry the return format of the conversion_function was not understood')
print('return: the dataframe ds with the time_axis as rows and spectral axis as columns\n')
print('if the ds.index.name ia not empty the "time axis" is in to that name the spectral axis is in ds.columns.name\n')
print('the return is investigated if it is one, two, or three things.\n ')
print('if two are returned then the second must be the name of what the intensity axis is. This value will then be set to data_type\n')
print('if three are returned the third is the baseunit (for the time axis) this allows to use the automatic naming in ps or ns ' )
return False
## see if we have the name a data types in the data
if data_type is not None:
self.data_type = data_type
if units is not None:
self.units = units
try:
if len(ds.columns.name)==0:
ds.columns.name= units
except:
pass
else:
try:
if len(ds.columns.name)!=0:
self.units = ds.columns.name
except:
pass
if baseunit is not None:
self.baseunit = baseunit
try:
if len(ds.index.name)==0:
if (baseunit == 'ps') or (baseunit == 'ns'):
ds.index.name='Time in %s'%baseunit
else:
ds.index.name= baseunit
except:
pass
else:
try:
if len(ds.index.name)!=0:
self.baseunit = ds.index.name
except:
pass
if shift_times_by is not None:
ds.index=ds.index.values+shift_times_by
if divide_times_by is not None:
ds.index=ds.index.values/divide_times_by
self.ds_ori=ds
self.ds=ds
self.__make_standard_parameter()
else:
self.__read_ascii_data(sep = sep, decimal = decimal, index_is_energy = index_is_energy,
transpose = transpose, sort_indexes = sort_indexes,
divide_times_by = divide_times_by, shift_times_by = shift_times_by,
external_time = external_time, external_wave = external_wave,
use_same_name = use_same_name, data_type = data_type, units = units,
baseunit = baseunit)
self.__make_standard_parameter()
def __read_ascii_data(self, sep = "\t", decimal = '.', index_is_energy = False, transpose = False,
sort_indexes = False, divide_times_by = None, shift_times_by = None,
external_time = None, external_wave = None, use_same_name = True, correct_ascii_errors = True,
data_type = None, units = None, baseunit = None):
'''Fancy function that handles the import of pure ascii files.
Parameters
----------
sep : str (optional)
is the separator between different numbers, typical are tap (Backslash t) (Default) ,one or
multiple white spaces 'backslash s+' or comma ','.
decimal : str (optional)
sets the ascii symbol that is used for the decimal sign. In most countries this is '.'(Default)
but it can be ',' in countries like Sweden or Germany
index_is_energy : bool (optional)
switches if the wavelength is given in nm (Default) or in eV (if True), currently everything
is handled as wavelength in nm internally
data_type: str (optional)
data_type is the string that represents the intensity measurements. Usually this contains if absolute
of differential data. This is used for the color intensity in the 2d plots and the y-axis for the 1d plots
units: str (optional)
this is used to identify the units on the energy axis and to label the slices, recognized is 'nm', 'eV' and 'keV'
but if another unit like 'cm^-1' is used it will state energy in 'cm^-1'. Pleas observe that if you use the index_is_energy
switch the program tries to convert this energy into wavelength.
baseunit: str (optional)
this is used to identify the units on the developing/time axis. This is name that is attached to the index of the dataframe.
setting this during import is equivalent to ta.baseunit
transpose : bool (optional)
if this switch is False (Default) the wavelength are the columns and the rows the times.
sort_indexes : bool (optional)
For False (Default) I assume that the times and energies are already in a rising order.
with this switch, both are sorted again.
divide_times_by : None or float (optional)
here a number can be given that scales the time by an arbitary factor. This is actually dividing
the times by this value. Alternatively there is the variable self.baseunit. The latter only affects
what is written on the axis, while this value is actually used to scale the times. None (Default)
ignores this
shift_times_by : None, float (optional)
This a value by which the time axis is shifted during import. This is a useful option of e.g.
the recording software does not compensate for t0 and the data is always shifted.
None (Default) ignores this setting
external_time : None or str (optional)
Here a filename extension (string) can be given that contains the time vector.
The file is assumed to be at the same path as the data and to contain a single
type of separated data without header.
If use_same_name = True (default)
It assumes that this is the ending for the file. The filename itself is taken from the filename.
e.g. if samp1.txt is the filename and external_time='.tid' the program searches
samp1.tid for the times. The transpose setting is applied and sets where the times are
to be inserted (row or column indexes)
If use_same_name = False this should be the file containing the vector for the time (in the same format as the main file)
external_wave : None or str (optional)
Here a filename extension (string) can be given that contains the wavelength vector.
If use_same_name = True (default)
The file is assumed to be at the same path as the data and to contain a single type
of separated data without header. This is the ending for the file. The filename itself
is taken from the filename. e.g. if samp1.txt is the filename and external_wave='.wav'
then the program searches samp1.wav for the wavelength. The transpose setting is applied
and sets where the wavelength are to be inserted (columns or row indexes)
If use_same_name = False
this should be a full filename that contains the vector
use_same_name : bool, optional
this switches if the external filename included the loaded filename or is a separate file
correct_ascii_errors : bool (optional)
If True (Default) then the code tries to catch some stuff like double minus signs and double dots
'''
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep, index_col=0)
if correct_ascii_errors:
if (self.ds_ori.applymap(type) == float).all().all():
pass#all columns were converted to float,nice
else:
print('some data bad, try filtering')
try:# try forced conversion
self.ds_ori=self.ds_ori.applymap(lambda x: re.sub('--', '-',x) if type(x) is str else x)
self.ds_ori=self.ds_ori.applymap(lambda x: re.sub(r'\.+', '.',x) if type(x) is str else x)
self.ds_ori=self.ds_ori.astype(np.float64)
except Exception as e:
print('force cleaning went wrong and the file %s can not be read. Error message is:'%self.filename)
print(e)
return False
if external_time is not None:
if use_same_name:
time_file=check_folder(path=self.path,filename=self.filename.split('.')[0]+'.'+external_time)
else:
time_file=check_folder(path=self.path,filename=external_time)
if external_wave is not None:
if use_same_name:
wave_file=check_folder(path=self.path,filename=self.filename.split('.')[0]+'.'+external_wave)
else:
wave_file=check_folder(path=self.path,filename=external_wave)
if external_time is not None:
data_file_name=check_folder(path=self.path,filename=self.filename)
times=pandas.read_csv(time_file,header=None,decimal=decimal).values.ravel()
if transpose:
if external_wave is not None:
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep , decimal=decimal, header=None)
waves=pandas.read_csv(wave_file,header=None,decimal=decimal).values.ravel()
self.ds_ori.index=waves
else:
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep , decimal=decimal, index_col=0,header=None)
self.ds_ori.columns=times
else:
if external_wave is not None:
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep, decimal=decimal,header=None)
waves=pandas.read_csv(wave_file,header=None,decimal=decimal).values.ravel()
self.ds_ori.columns=waves
else:
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep, decimal=decimal)
self.ds_ori.index=times
elif external_wave is not None:
data_file_name=check_folder(path=self.path,filename=self.filename)
waves=pandas.read_csv(wave_file,header=None,decimal=decimal).values.ravel()
if transpose:
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep, decimal=decimal)
self.ds_ori.index=waves
else:
self.ds_ori=pandas.read_csv(check_folder(path=self.path,filename=self.filename), sep=sep, decimal=decimal,index_col=0,header=None)
self.ds_ori.columns=waves
self.ds_ori.columns=self.ds_ori.columns.astype('float')#Make columns indexes numbers
self.ds_ori.index=self.ds_ori.index.astype('float')#Make row indexes numbers
if index_is_energy:
self.ds_ori.index=scipy.constants.h*scipy.constants.c/(self.ds_ori.index*1e-9*scipy.constants.electron_volt)
if transpose:
self.ds_ori=self.ds_ori.T
if sort_indexes:
self.ds_ori.sort_index(axis=0,inplace=True)
self.ds_ori.sort_index(axis=1,inplace=True)
if shift_times_by is not None:
self.ds_ori.index=self.ds_ori.index.values+shift_times_by
if divide_times_by is not None:
self.ds_ori.index=self.ds_ori.index.values/divide_times_by
if data_type is not None:
self.data_type = data_type
if units is not None:
self.units = units
if baseunit is not None:
self.baseunit = baseunit
def __make_standard_parameter(self):
'''function that sets the standard parameter. The function takes no input, but we use this docstring to explain the parameter.
Parameters
-------------
log_scale : bool, optional
If False (Default), The 2D plots (Matrix) is plotted with a pseudo logarithmic intensity scale.
This usually does not give good results unless the intensity scale is symmetric
self.cmap : matplotlib.cm
(Default) standard_map - global parameter
cmap is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
self.lintresh : float
The pseudo logratihmic range "symlog" is used for most time axis. Symlog plots a range around
time zero linear and beyond this linear treshold 'lintresh' on a logarithmic scale. (Default) 0.3
self.log_fit :
(Default) False\n
Transfer all the time-fitting parameters into log-space before the fit
self.ignore_time_region : None or list (of two floats or of lists)
(Default) None
cut set a time range with a low and high limit from the fits. (Default) None nothing happens
The region will be removed during the fitting process (and will be missing in the fit-result
plots)\n
Usage single region: [lower region limit,upper region limit]\n
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
self.error_matrix_amplification :
(Default) 10
self.rel_wave : float or list (of floats)
(Default) np.arange(300,1000,100)\n
'rel_wave' and 'width' (in the object called 'wavelength_bin' work together for the creation
of kinetic plots. When plotting kinetic spectra one line will be plotted for each entrance
in the list/vector rel_wave. During object generation the vector np.arange(300,1000,100)
is set as standard. Another typical using style would be to define a list of interesting
wavelength at which a kinetic development is to be plotted. At each selected wavelength
the data between wavelength+ta.wavelength_bin and wavelength-ta.wavelength_bin is averaged
for each timepoint returned
self.rel_time : float or list/vector (of floats)
(Default) [0.2,0.3,0.5,1,3,10,30,100,300,1000,3000,9000]\n
For each entry in rel_time a spectrum is plotted. If time_width_percent=0 (Default) the
nearest measured timepoint is chosen. For other values see 'time_width_percent'
self.time_width_percent : float
(Default) 0 "rel_time" and "time_width_percent" work together for
creating spectral plots at specific timepoints. For each entry
in rel_time a spectrum is plotted. If however e.g. time_width_percent=10
the region between the timepoint closest to the 1.1 x timepoint
and 0.9 x timepoint is averaged and shown (and the legend adjusted accordingly).
This is particularly useful for the densly
sampled region close to t=0. Typically for a logarithmic recorded kinetics, the
timepoints at later times will be further appart than 10 percent of the value,
but this allows to elegantly combine values around time=0 for better statistics.
This averaging is only applied for the plotting function and not for the fits.
self.baseunit : str
(Default) 'ps'\n
baseunit is a neat way to change the unit on the time axis of the plots. (Default) 'ps', but they
can be frames or something similarly. This is changing only the label of the axis.
During the import there is the option to divide the numbers by a factor.
I have also used frames or fs as units. Important is that all time units will be labeled with
this unit.
self.mod :
(Default) 'exponential'\n
This is the default fitting function, in general this is discussed in the fitting section
self.scattercut : None or iterable (of floats or other iterable, always pairs!)
(Default) None\n
intented to "cut" one or multiple scatter regions. (if (Default) None nothing
happens) If it is set the spectral region between the limits is set to zero.
Usage single region: [lower region limit,upper region limit],
use for multiple regions:[[lower limit 1,upper limit 1],[lower limit 2,upper limit 2],...]
self.bordercut : None or iterable (with two floats)
(Default) None\n
cut spectra at the low and high wavelength limit. (Default) None
uses the limits of measurement
self.time_bin : None or int
(Default) None
is dividing the points on the time-axis in even bins and averages the found values in between.
This is a hard approach that also affects the fits. I do recommend to use this carefully,
it is most useful for modulated data. A better choice for transient absorption that only
affects the kinetics is 'time_width_percent'
self.timelimits : None or list (of 2 floats)
(Default) None\n
cut times at the low and high time limit. (Default) None uses the limits of measurement
Important: If either the background or the chirp is to be fit this must include the
time before zero! Useful: It is useful to work on different regions, starting with
the longest (then use the ta.Backgound function prior to fit) and expand from there
data_type : str
this is the datatype and effectively the unit put on the intensity axis
(Default)'differential Absorption in $\mathregular{\Delta OD}$
self.wave_nm_bin : None or float
(Default) None\n
rebins the original data into even intervals. If set to None the original data will be used.
If set to a width (e.g. 2nm), the wavelength axis will be divided into steps of this size
and the mean of all measurements in the interval is taken. The re-binning stops as soon as
the measured stepsize is wider than given here, then the original bins are used.
This function is particularly useful for spectrometer with non-linear dispersion,
like a prism in the infrared.
self.wavelength_bin : float, optional
(Default) 10nm the width used in kinetics, see below
self.intensity_range : None, float or list [of two floats]
(Default) None - intensity_range is a general switch that governs what intensity range the plots show.
For the 1d plots this is the y-axis for the 2d-plots this is the colour scale.
This parameter recognizes three settings. If set to "None" (Default) this uses the minimum and
maximum of the data. A single value like in the example below and the intended use is the symmetric
scale while a list with two entries an assymmetric scale e.g.
intensity_range=3e-3 is converted into intensity_range=[-3e-3,3e-3]
self.ds_ori.columns.name : str, optional
(Default) 'Wavelength in nm'\n
This is the general energy axis. here we define it with the unit. Change this to energy for use in e.g x-ray science
self.ds_ori.index.name : str, optional
Standard 'Time in %s' % self.baseunit
self.data_type: str (optional)
self.data_type='diff. Absorption in $\mathregular{\Delta OD}$'
self.fitcoeff : list (5 floats)
chirp correction polynom
self.chirp_file : str
if there is a file withthe right name write it here, otherwise None
self.figure_path : str
Path for saving figures, if set
self.save_figures_to_folder : bool
if True all figures are automatically saved when any plotfunction is called
Examples
-----------
>>> ta.bordercut=[350,1200] #remove all data outside this limit
>>> ta.scattercut=[522,605] #set data inside this limit to zero
>>> ta.timelimits=[0.2,5000] #remove all data outside this limit
>>> ta.wave_nm_bin=5 #rebin the data to this width
>>> ta.intensity_range=3e-3 #equivalent to [-3e-3,3e-3]
>>> ta.intensity_range=[-1e-3,3e-3] #intensity that is plotted in 2d plot and y-axis in 1d plots
>>> ta.cmap=matplotlib.cm.prism #choose different colour map
>>> ta.ignore_time_region=[-0.1,0.1] #ignore -0.1ps to 0.1ps
'''
self.log_scale = False if not hasattr(self, 'log_scale') else self.log_scale
self.cmap = standard_map if not hasattr(self, 'cmap') else self.cmap
self.lintresh = 0.3 if not hasattr(self, 'lintresh') else self.lintresh
self.log_fit = False if not hasattr(self, 'log_fit') else self.log_fit
self.ignore_time_region = None if not hasattr(self, 'ignore_time_region') else self.ignore_time_region
self.error_matrix_amplification = 10 if not hasattr(self, 'error_matrix_amplificatio') else self.error_matrix_amplification
self.rel_wave = np.arange(300,1000,100) if not hasattr(self, 'rel_wave') else self.rel_wave
self.rel_time = [0.2,0.3,0.5,1,3,10,30,100,300,1000,3000,9000] if not hasattr(self, 'rel_time') else self.rel_time
self.time_width_percent = 0 if not hasattr(self, 'time_width_percent') else self.time_width_percent
self.baseunit = 'ps' if not hasattr(self, 'baseunit') else self.baseunit
self.mod = 'exponential' if not hasattr(self, 'mod') else self.mod
self.scattercut = None if not hasattr(self, 'scattercut') else self.scattercut
self.bordercut = None if not hasattr(self, 'bordercut') else self.bordercut
self.time_bin = None if not hasattr(self, 'time_bin') else self.time_bin
self.timelimits = None if not hasattr(self, 'timelimits') else self.timelimits
self.wave_nm_bin = None if not hasattr(self, 'wave_nm_bin') else self.wave_nm_bin
self.wavelength_bin = 10 if not hasattr(self, 'wavelength_bin') else self.wavelength_bin
self.save_figures_to_folder = False if not hasattr(self, 'save_figures_to_folder') else self.save_figures_to_folder
self.intensity_range = None if not hasattr(self, 'intensity_range') else self.intensity_range
self.ds_ori.index.name = 'Time in %s' % self.baseunit if not hasattr(self, 'ds_ori.index.name') else self.ds_ori.index.name
self.equal_energy_bin = None if not hasattr(self, 'equal_energy_bin') else self.equal_energy_bin
self.units='nm' if not hasattr(self, 'units') else self.units
if self.units == 'nm':
self.ds_ori.columns.name = 'Wavelength in %s'%self.units if not hasattr(self, 'ds_ori.columns.name') else self.ds_ori.columns.name
elif self.units == 'eV':
self.ds_ori.columns.name = 'Energy in %s'%self.units if not hasattr(self, 'ds_ori.columns.name') else self.ds_ori.columns.name
elif self.units == 'keV':
self.ds_ori.columns.name = 'Energy in %s'%self.units if not hasattr(self, 'ds_ori.columns.name') else self.ds_ori.columns.name
else:
self.ds_ori.columns.name = 'Energy in %s'%self.units if not hasattr(self, 'ds_ori.columns.name') else self.ds_ori.columns.name
self.data_type= 'diff. Absorption in $\mathregular{\Delta OD}$' if not hasattr(self, 'data_type') else self.data_type
try:#self.fitcoeff
self.fitcoeff
if len(list(self.fitcoeff))<5:raise
except:
self.fitcoeff=[0,0,0,0,0] #: test comment here
try:#self.chirp_file
self.chirp_file
except:
if os.path.isfile(check_folder(path=self.path,filename=self.filename.split('.')[0] + '_chirp.dat')):
self.chirp_file=self.filename.split('.')[0] + '_chirp.dat'
else:
self.chirp_file=False
try:#self.figure_path
self.figure_path
except:
if self.save_figures_to_folder:
self.figure_path=check_folder(path="result_figures",current_path=path)
else:
self.figure_path=None
self.ds=self.ds_ori.copy()
def Filter_data(self, ds=None, cut_bad_times = True, replace_bad_values = None, value = 20, uppervalue = None, lowervalue = None, upper_matrix = None, lower_matrix = None):
'''Filteres the data by applying hard replacements. if both replace_bad_values and
cut_bad_times are false or None, the times above "value" are replaced by zero
Parameters
------------
ds : pandas Dataframe, optional
if this is None (default) then the self.ds and self.ds_ori wil be filtered
value : float, optional
all values above this (absolute) value are considered to be corrupted. (Default 20) as classically the setup
reports optical DEnsity, an OD of 20 would be far above the typically expected
OD 1e-3. Pascher instrument software uses a value of 21 to indicate an error.
uppervalue : float, optional
all values above this number are considered to be corrupted. (Default 20) as classically the setup
reports optical DEnsity, an OD of 20 would be far above the typically expected
OD 1e-3. Pascher instrument software uses a value of 21 to indicate an error.
lowervalue : float, optional
all values below this number are considered to be corrupted. (Default -20) as classically the setup
reports optical DEnsity, an OD of -20 would be far above the typically expected
OD 1e-3. Pascher instrument software uses a value of 21 to indicate an error.
replace_bad_values : None of float, optional
values above the treshold are replaced with this value. Ignored of None (Default)
cut_bad_times = bool, optional
True (Default) removes the whole time where this is true
upper_matrix : Pandas DataFrame, optional
all values above this treshold will be put N/A or replace by the value in replace_bad_values
lower_matrix Pandas DataFrame, optional
all values below this treshold will be put N/A or replace by the value in replace_bad_values
the value is the upper bound. everything
above will be filtered. Standard is to drop the rows(=times) where something went wrong
Examples
---------
typical usage
>>> import plotfunc as pf
>>> ta=pf.TA('testfile.SIA')
>>> ta.Filter_data()
>>> ta.Filter_data(value=1) #to filter times with at least one point with OD 1
'''
if uppervalue is None: uppervalue = np.abs(value)
if lowervalue is None: lowervalue = -np.abs(value)
if replace_bad_values is not None:
cut_bad_times=False
if ds is None:
filtering=[self.ds,self.ds_ori]
else:
filtering=[ds]
for dataset in filtering:
if any([self.ignore_time_region is not None, self.scattercut is not None, self.bordercut is not None, self.timelimits is not None]):
dataset=sub_ds(dataset, ignore_time_region = self.ignore_time_region, scattercut = self.scattercut, bordercut = self.bordercut, timelimits = self.timelimits)
if cut_bad_times: #timepoint filter, delete the timepoints where value is stupid
matrix_size=len(dataset.index.values)
if upper_matrix is None:
damaged_times=dataset[np.any(dataset.values>uppervalue,axis=1)].index
else:
damaged_times=dataset[np.any(dataset.values>upper_matrix,axis=1)].index
dataset.drop(damaged_times,inplace = True)
if lower_matrix is None:
damaged_times=dataset[np.any(dataset.values<lowervalue,axis=1)].index
else:
damaged_times=dataset[np.any(dataset.values<lower_matrix,axis=1)].index
dataset.drop(damaged_times,inplace = True)
if len(dataset.index.values)<matrix_size*0.8:
print('attention, more than 20% of the data was removed by this filter.')
print('Please check with if the spectal borders contain regions without light (and high noise)')
print('Setting a bordercut and scattercut before the filtering might be useful')
else:
if replace_bad_values is None: #individual data filter
replace_bad_values=np.nan
if upper_matrix is None:
dataset.values[dataset.values>uppervalue]=replace_bad_values
else:
dataset.values[dataset.values>upper_matrix]=replace_bad_values
if lower_matrix is None:
dataset.values[dataset.values<lowervalue]=replace_bad_values
else:
dataset.values[dataset.values<lower_matrix]=replace_bad_values
if replace_bad_values == np.nan:
if dataset.isna().sum().sum()>0.2 * dataset.notna().sum().sum():
print('attention, more than 20% of the data was removed by this filter.')
print('Please check with if the spectal borders contain regions without light (and high noise)')
print('Setting a bordercut and scattercut before the filtering might be useful')
else:
if dataset[dataset==replace_bad_values].notna().sum().sum()> 0.2* dataset[dataset!=replace_bad_values].notna().sum().sum():
print('attention, more than 20% of the data was removed by this filter.')
print('Please check with if the spectal borders contain regions without light (and high noise)')
print('Setting a bordercut and scattercut before the filtering might be useful')
if ds is not None:return filtering[0]
def Background(self, lowlimit=None,uplimit=-1, use_median=False, ds=None, correction=None):
'''This is the background correction. In general it for each measured
wavelength averages the values from 'lowlimit' to 'uplimit' and
subtracts it from the data. It rund on the object (global) or if
given a specific ds local.
The low and uplimit can be set anywhere to substract any background.
It is important to note that many problems during measurements might
be visible in the data before time zero. So I recommend to first
plot without background correction and only after this inspection
apply the background correction.
The fit function has its own way to calculcate and apply a background
That could be used instead (but making the fit less stable)
Parameters
------------
lowlimit : None or float, optional
this is the lower limit from which the average (or median) is taken
(Default) is None, in which case the lower limit of the data is used.
uplimit : None or float, optional
this is the upper limit until which the average (or median) is taken
(Default) is -1 (usually ps), in which case the lower limit of the data is used.
use_median : bool, optional
the Median is a more outlier resistant metric in comparision to
the Mean (Average). However the values are not quite as close
to the distribution center in case of very few values. False
(Default) means the Mean is used
ds : None or DataFrame, optional
if None (Default) the internal Dataframe self.ds is used,
otherwise the pandas DataFrame ds is corrected and returned
correction : None or DataFrame, optional
this is the correction applied. It must be a DataFrame with
the same numbers of columns (spectral points) as the used ds
Examples
--------
if the object self has the name "ta"
typical useage:
>>> ta.Background()
specify inegrated are to - inf (Default) up to -0.5ps and use the Median for computation
>>> ta.Background(uplimit = -0.5, use_median = True)
'''
if ds is None:
run_global=True
ds=self.ds
else:
run_global=False
if correction is None:raise ValueError('We must have correction given, to slow otherhwise')
if (lowlimit is None) and (correction is None):
if use_median:
correction=ds[:uplimit].median(axis=0)
else:
correction=ds[:uplimit].mean(axis=0)
elif (lowlimit is not None) and (correction is None):
if use_median:
correction=ds[lowlimit:uplimit].median(axis=0)
else:
correction=ds[lowlimit:uplimit].mean(axis=0)
if run_global:
self.ds=ds-correction
self.background_par=[lowlimit,uplimit,use_median,correction]
else:
return ds-correction
def Man_Chirp(self,shown_window=[-1,1],path=None,max_points=40,cmap=cm.prism,ds=None):
'''Triggering of Manuel Fix_Chirp. usually used when Cor_Chirp has run already.
Alternatively delete the chirp file. This Function opens a plot in which the user manually selects a number of points
These points will then be interpolated with a 4th order polynomial
The user can then select a new t=0 point.
The first option allows to fine select an intensity setting for this chirp correction.
However sometimes spikes are making this things difficult. In this case set a guessed intensity with self.intensity_range=1e-3
Parameters
-------------
path : str or path object (optional)
if path is a string without the operation system dependent separator, it is treated as a relative path,
e.g. data will look from the working directory in the sub director data. Otherwise this has to be a
full path in either strong or path object form.
shown_window : list (with two floats), optional
Defines the window that is shown during chirp correction. If the t=0 is not visible, adjust this parameter
to suit the experiment. If problems arise, I recomment to use Plot_Raw to check where t=0 is located
max_points : int, optional
Default = 40 max numbers of points to use in Gui selection. Useful option in case no middle mouse button
is available. (e.g. touchpad)
cmap : matplotlib colourmap, optional
Colourmap to be used for the chirp correction. While there is a large selection here I recommend to choose
a different map than is used for the normal 2d plotting.\n
cm.prism (Default) has proofen to be very usefull
ds: pandas dataframe,optional
this allows to hand in an external ds, if this is done then the on disk saved fitcoeff are the new ones only and the
function returns the new fitcoeff and the combined fitcoeff, self also has a new variable called self.combined_fitcoeff
the original file on dis and self.fitcoeff are NOT overwritten (are the old ones)
the self.ds is the NEW one (with the correction applied)
to reverse simply run Cor_Chirp()
to permanently apply change self.fitcoeff with self.combined_fitcoeff and rename the file with 'filename_second_chirp' to filename_chirp
'''
if ds is None:
ds=self.ds_ori
original=True
else:
original=False
if original:
temp_ds = Fix_Chirp(ds, cmap = cmap, save_file = None, intensity_range = self.intensity_range,
wave_nm_bin = 10, shown_window = shown_window, filename = self.filename,
scattercut = self.scattercut, bordercut = self.bordercut,
path = check_folder(path = path, current_path = self.path), max_points = max_points)
else:
temp_ds = Fix_Chirp(ds, cmap = cmap, save_file = None, intensity_range = self.intensity_range,
wave_nm_bin = 10, shown_window = shown_window, filename = self.filename+'_second_chirp',
scattercut = self.scattercut, bordercut = self.bordercut,
path = check_folder(path = path, current_path = self.path), max_points = max_points)
if isinstance(temp_ds,pandas.DataFrame):
self.ds=temp_ds
self.chirp_file=self.filename.split('.')[0] + '_chirp.dat'
if original:#we have run from scratch
self.Cor_Chirp(path=path)
else:
print('you provided a separate ds file. returned are the new fitcoeff and the combined fitcoeff, ta also has a new variable called ta.combined_fitcoeff')
save_file=check_folder(path=path,current_path = self.path, filename=self.filename+'_second_chirp')
with open(save_file,'r') as f:
new_fitcoeff=f.readline()
new_fitcoeff=np.array(new_fitcoeff.split(','),dtype='float')
self.combined_fitcoeff=self.fitcoeff+new_fitcoeff
return new_fitcoeff,self.combined_fitcoeff
else:
raise Warning('Man Chirp interrupted')
def Cor_Chirp(self, chirp_file = None, path = None, shown_window = [-1, 1], fitcoeff = None, max_points = 40, cmap = cm.prism):
'''*Cor_Chirp* is a powerful Function to correct for a different arrival times of
different wavelength (sometimes call chirp).
In general if a file is opened for the first time this function is opening
a plot and allows the user to select a number of points, which are then
approximated with a 4th order polynomial and finally to select a point
that is declared as time zero. The observed window as well as the intensities
and the colour map can be chosen to enable a good correction. Here a fast
iterating colour scheme such as "prism" is often a good choice. In all of the
selections a left click selects, a right click removes the last point and
a middle click (sometime appreviated by clicking left and right together)
finishes the selection. If no middle click exists, the process
automatically ends after max_points (40 preset).
The first option allows to fine select an intensity setting for this chirp correction.
However sometimes spikes are making this things difficult.
In this case set a guessed intensity with self.intensity_range=1e-3\n
Note that scattercut, bordercut and intensity_range can be used
After the first run the polynom is stored in self.fitcoeff, a new matrix
calculated from self.ds_ori that is stored as self.ds and a file stored in the
same location as the original data. The second time the function *Cor_Chirp* is
run the function will find the file and apply the chirp correction automatically.
If one does want to re-run the chirp correction the function *Man_Chirp* does
not look for this file, but creates after finishing a new file.
Alternatively the polynom or a filename can be given that load a chirp correction
(e.g. from a different run with the same sample).
The function *Cor_Chirp* selects in the order:
# "fitcoeff"
# "other files"
# "stored_file"
# call Man_Chirp (clicking by hand)
Parameters
-------------
chirp-file : None or str, optional
If a raw file was read(e.g. "data.SIA") and the chirp correction was
completed, a file with the attached word "chirp" is created and
stored in the same location. ("data_chirp.dat") This file contains
the 5 values of the chirp correction. By selecting such a file
(e.g. from another raw data) a specific chirp is applied. If a
specific name is given with **chirp_file** (and optional **path**)
then this file is used.\n
GUI\n
The word *'gui'* can be used instead of a filename to open a gui that
allows the selection of a chrip file
path : str or path object (optional)
if path is a string without the operation system dependent separator, it is treated as a relative path,
e.g. data will look from the working directory in the sub director data. Otherwise this has to be a
full path in either strong or path object form.
shown_window : list (with two floats), optional
Defines the window that is shown during chirp correction. If the t=0 is not visible, adjust this parameter
to suit the experiment. If problems arise, I recomment to use Plot_Raw to check where t=0 is located
fitcoeff : list or vector (5 floats), optional
One can give a vector/list with 5 numbers representing the parameter
of a 4th order polynomial (in the order
:math:`(a4*x^4 + a3*x^3+a2*x^2+a1*x1+a0)`. The chirp parameter are
stored in ta.fitcoeff and can thus be used in other TA objects. This
vector is also stored with the file and automatically applied during
re-loading of a hdf5-object
max_points : int, optional
Default = 40 max numbers of points to use in Gui selection. Useful option in case no middle mouse button
is available. (e.g. touchpad)
cmap : matplotlib colourmap, optional
Colourmap to be used for the chirp correction. While there is a large selection here I recommend to choose
a different map than is used for the normal 2d plotting.\n
cm.prism (Default) has proofen to be very usefull
Examples
----------
In most cases:
>>> import plot_func as pf
>>> ta = pf.TA('test1.SIA') #open the original project,
>>> ta.Cor_Chirp()
Selecting a specific correction
>>> ta.Cor_Chirp(‘gui’)
>>> ta.Cor_Chirp(chirp_file = 'older_data_chirp.dat')
>>> #use the coefficients from a different project
>>> ta.Cor_Chirp(fitcoeff = ta_old.fitcoeff) #use the coefficients from a different project
'''
if chirp_file is None:
chirp_file=self.chirp_file
elif 'gui' in chirp_file:
root_window = tkinter.Tk()
root_window.withdraw()
root_window.attributes('-topmost',True)
root_window.after(1000, lambda: root_window.focus_force())
complete_path = filedialog.askopenfilename(initialdir=os.getcwd())
listen=os.path.split(complete_path)
path=os.path.normpath(listen[0])
chirp_file=listen[1]
path=check_folder(path,self.path)
if fitcoeff is not None:#we use a stored project
try:
if len(fitcoeff)==5 or len(fitcoeff)==6:#we provide a valid list/vector
if all(elem == 0 for elem in fitcoeff):
self.ds=self.ds_ori
print('all chirp coefficients are zero so no chirp correction applied')
else:
self.ds=Fix_Chirp(self.ds_ori,fitcoeff=fitcoeff)
self.fitcoeff=fitcoeff #we came to here so fitcoeff must be right
else:
raise
except:
self.ds=self.ds_ori
print('something went wrong with the provided fitcoeff. This should be either a list/array with 5-6 parameter or the object should contain the parameter')
print('fitcoeff is currently:' + fitcoeff)
else:
try:
self.ds=Fix_Chirp(self.ds_ori,cmap=cmap,save_file=check_folder(path=path,filename=chirp_file),scattercut=self.scattercut,bordercut=self.bordercut,intensity_range=self.intensity_range,wave_nm_bin=10,shown_window=shown_window,fitcoeff=fitcoeff,max_points=max_points)
with open(check_folder(path=path,filename=chirp_file),'r') as f:
self.fitcoeff=[float(a) for a in f.readline().split(',')]
except:
print(check_folder(path=self.path,filename=self.filename.split('.')[0] + '_chirp.dat'))
if os.path.isfile(check_folder(path=self.path,filename=self.filename.split('.')[0] + '_chirp.dat')):
print('somehting is wrong, try deleting old chirp file')
raise
else:
print('No old chirp file')
self.Man_Chirp(path=path,cmap=cmap,shown_window=shown_window,max_points=max_points)
chirp_file=self.chirp_file
with open(check_folder(path=path,filename=chirp_file),'r') as f:
self.fitcoeff=[float(a) for a in f.readline().split(',')]
self.ds.columns.name=self.ds_ori.columns.name
self.ds.index.name=self.ds_ori.index.name
def Plot_Interactive(self, fitted = False, ds = None, cmap = None, plot_on_move = False):
'''interactive plotting function. it plots the matrix in the middle and two slices that are selected by the mouse (click)
Parameters
---------------
fitted : bool, optional
this switch decides if the fitted or the RAW data is plotted with this widget to
inspect the data data. If fitted is False (Default) then the raw data and an interpolation
is used to plot.
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
ds : DataFrame, optional
if None (Default), the program first tests self.ds and if this is not there then self.ds_ori.
This option was introduced to allow plotting of other matrixes with the same parameter
plot_on_move : bool, optional
Default: False plots the slices after click, on True the plot constantly reslices and on click
The current position is written down.
'''
from matplotlib.widgets import Cursor
if cmap is None:cmap=self.cmap
if ds is None:
if not fitted:
if self.ds is None:
ds=self.ds_ori.copy()
else:
ds=self.ds.copy()
else:
ds=self.re['A']
modelled=self.re['AC']
intensity_range=self.intensity_range
if intensity_range is None:
try:
maxim=max([abs(ds.values.min()),abs(ds.values.max())])
intensity_range=[-maxim,maxim]
except:
intensity_range=[-1e-2,1e-2]
else:
if not hasattr(intensity_range,'__iter__'):#lets have an lazy option
intensity_range=[-intensity_range,intensity_range]
class MouseMove:
# initialization
def __init__(self, ds, cmap, intensity_range, log_scale, baseunit,
timelimits, scattercut, bordercut, wave_nm_bin, equal_energy_bin, ignore_time_region,
time_bin, lintresh, data_type, width, time_width_percent):
fig = plt.figure(tight_layout=True,figsize=(14,8))
gs = GridSpec(5, 4)
self.ax= fig.add_subplot(gs[1:, :3])
self.ds=ds
self.cmap=cmap
self.intensity_range=intensity_range
self.log_scale=log_scale
self.baseunit=baseunit
self.timelimits=timelimits
self.scattercut=scattercut
self.bordercut=bordercut
self.wave_nm_bin=wave_nm_bin
self.equal_energy_bin=equal_energy_bin
self.ignore_time_region=ignore_time_region
self.time_bin=time_bin
self.data_type=data_type
self.width=width
self.lintresh=lintresh
self.time_width_percent=time_width_percent
self.ax = plot2d(ds=ds, ax=self.ax, cmap=cmap, intensity_range=self.intensity_range,
log_scale=self.log_scale, baseunit=self.baseunit, timelimits=self.timelimits,
scattercut=self.scattercut, bordercut=self.bordercut, wave_nm_bin=self.wave_nm_bin, equal_energy_bin=self.equal_energy_bin,
ignore_time_region=self.ignore_time_region, time_bin=self.time_bin,
lintresh=self.lintresh, data_type = self.data_type, use_colorbar = False)
self.ax_time= fig.add_subplot(gs[0, :3],sharex=self.ax)
self.ax_kinetic= fig.add_subplot(gs[1:, -1],sharey=self.ax)
plt.subplots_adjust(wspace=0,hspace=0)
if plot_on_move:
fig.canvas.mpl_connect('motion_notify_event', self.move)
fig.canvas.mpl_connect('button_press_event', self.click)
else:
fig.canvas.mpl_connect('button_press_event', self.move)
def click(self, event):
x, y = event.xdata, event.ydata
if self.equal_energy_bin is not None:
x=scipy.constants.h*scipy.constants.c/(x*1e-9*scipy.constants.electron_volt)
print('x=%g, y=%g\n'%(x,y))
def move(self, event):
x, y = event.xdata, event.ydata
if self.equal_energy_bin is not None:
x=scipy.constants.h*scipy.constants.c/(x*1e-9*scipy.constants.electron_volt)
try:
self.ax_time.cla()
except:
pass
if not fitted:
ds_temp1 = sub_ds(ds = Frame_golay(ds,5,3), times = y, time_width_percent = self.time_width_percent,
scattercut = self.scattercut, drop_scatter=True, bordercut = self.bordercut,
ignore_time_region = self.ignore_time_region, wave_nm_bin = self.wave_nm_bin, equal_energy_bin=self.equal_energy_bin,
wavelength_bin = self.width)
ds_temp1.plot(ax=self.ax_time,style='-',color='red')
else:
ds_temp1 = sub_ds(ds = modelled, times = y, time_width_percent = self.time_width_percent,
scattercut = self.scattercut, drop_scatter=True, bordercut = self.bordercut,
ignore_time_region = self.ignore_time_region, wave_nm_bin = self.wave_nm_bin, equal_energy_bin=self.equal_energy_bin,
wavelength_bin = self.width)
ds_temp1.plot(ax=self.ax_time,style='-',color='red')
ds_temp = sub_ds(ds = ds, times = y, time_width_percent = self.time_width_percent,
scattercut = self.scattercut, drop_scatter=True, bordercut = self.bordercut,
ignore_time_region = self.ignore_time_region, wave_nm_bin = self.wave_nm_bin, equal_energy_bin=self.equal_energy_bin,
wavelength_bin = self.width)
ds_temp.plot(ax=self.ax_time,style='*',color='black')
self.ax_time.plot(self.ax_time.get_xlim(),[0,0],'gray')
if not fitted:
self.ax_time.legend(['%.3g %s smoothed'%(y,self.baseunit)])
else:
self.ax_time.legend(['%.3g %s fitted'%(y,self.baseunit)])
self.ax_time.set_yticks(self.ax_time.get_ylim())
self.ax_time.set_yticklabels(['%.1e'%f for f in self.ax_time.get_ylim()])
for i in range(3):
try:
self.ax_kinetic.lines.pop(0)
except:
pass
if self.width is None:
self.width = 10
if not fitted:
ds_temp1 = sub_ds(ds = Frame_golay(ds), wavelength = x, scattercut = self.scattercut, drop_scatter=True,
bordercut = self.bordercut, ignore_time_region = self.ignore_time_region,
wave_nm_bin = self.wave_nm_bin,wavelength_bin = self.width)
self.ax_kinetic.plot(ds_temp1.values,ds_temp1.index.values,'-',label='%.0f smoothed'%x,color='red')
else:
ds_temp1 = sub_ds(ds = modelled, wavelength = x, scattercut = self.scattercut, drop_scatter=True,
bordercut = self.bordercut, ignore_time_region = self.ignore_time_region,
wave_nm_bin = self.wave_nm_bin, wavelength_bin = self.width)
self.ax_kinetic.plot(ds_temp1.values,ds_temp1.index.values,'-',label='%.0f fitted'%x,color='red')
ds_temp = sub_ds(ds = ds, wavelength = x, scattercut = self.scattercut, drop_scatter=True,
bordercut = self.bordercut, ignore_time_region = self.ignore_time_region,
wave_nm_bin = self.wave_nm_bin, wavelength_bin = self.width)
self.ax_kinetic.set_xlim(min([0,min(ds_temp.values)]),max([max(ds_temp.values),0]))
self.ax_kinetic.plot(ds_temp.values,ds_temp.index.values,'*',label='%.0f'%x, color='black')
self.ax_kinetic.plot([0,0],self.ax_kinetic.get_ylim(),'gray')
self.ax_kinetic.legend(['%.0f'%x])
self.ax_kinetic.set_xticks(self.ax_kinetic.get_xlim())
self.ax_kinetic.set_xticklabels(['%.1e'%f for f in self.ax_kinetic.get_xlim()])
self.ax_kinetic.set_yticklabels(self.ax.get_yticklabels())
plt.subplots_adjust(wspace=0,hspace=0)
eve=MouseMove(ds, cmap, self.intensity_range, self.log_scale, self.baseunit, self.timelimits,
self.scattercut, self.bordercut, self.wave_nm_bin, self.equal_energy_bin, self.ignore_time_region,
self.time_bin, self.lintresh, self.data_type, self.wavelength_bin, self.time_width_percent)
cursor = Cursor(eve.ax, useblit=True, color='red', linewidth=2)
return eve,cursor
def Plot_RAW(self, plotting = range(4), title = None, scale_type = 'symlog', times = None,
cmap = None, filename = None, path = "result_figures", savetype = 'png' , print_click_position = False,
plot_second_as_energy = True, ds = None):
'''This is a wrapper function that triggers the plotting of various RAW (non fitted) plots.
The shaping parameter are taken from the object and should be defined before.
The parameter in this plot call are to control the general look and features of the plot.
Which plots are printed is defined byt the first command (plotting)
The plots are generated on the fly using self.ds and all the shaping parameter
In all plots the RAW data is plotted as dots and interpolated with lines
(using Savitzky-Golay window=5, order=3 interpolation). As defined by the internal parameters
at selected time-points and the kinetics for selected wavelength are shaped by the
object parameter. The SVD is performed using the same shaping parameter and is commonly
used as an orientation for the number of components in the data.
Everything is handed over to 'plot_raw' function that can be used for extended RAW plotting.
Parameters
---------------
plotting : int or iterable (of integers), optional
This parameter determines which figures are plotted
the figures can be called separately with plotting = 1
or with a list of plots (Default) e.g. plotting=range(4) calls plots 0,1,2,3.
The plots have the following numbers:
0. Matrix
1. Kinetics
2. Spectra
3. SVD
The plotting takes all parameter from the "ta" object.
title : None or str
title to be used on top of each plot
The (Default) None triggers self.filename to be used. Setting a specific title as string will.
be used in all plots. To remove the title all together set an empty string with this command title="" .
Scale_type : None or str
is a general setting that can influences what time axis will be used for the plots.
"symlog" (linear around zero and logarithmic otherwise) "lin" and "log" are valid options.
times : int
are the number of components to be used in the SVD (Default) is 6.
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
filename : str, optional
offers to replace the base-name used for all plots (to e.g.~specify what sample was used).
if (Default) None is used, the self.filename is used as a base name. The filename plays only a
role during saving, as does the path and savetype.
path : None or str or path object, optional
This defines where the files are saved if the safe_figures_to_folder parameter is True,
quite useful if a lot of data sets are to be printed fast.
If a path is given, this is used. If a string like the (Default) "result_figures" is given,
then a subfolder of this name will be used (an generated if necessary)
relative to self.path. Use and empty string to use the self.path
If set to None, the location of the plot_func will be used and
a subfolder with title "result_figures" be generated here.
savetype : str or iterable (of str), optional
matplotlib allows the saving of figures in various formats. (Default) "png",
typical and recommendable options are "svg" and "pdf".
print_click_position : bool, optional
if True then the click position is printed for the spectral plots
ds : DataFrame, optional
if None (Default), the program first tests self.ds and if this is not there then self.ds_ori.
This option was introduced to allow plotting of other matrixes with the same parameter
Examples
------------
Typically one would call this function empty for an overview. We name the object "ta" so with
>>> ta=pf.TA('testfile.SIA')
This would trigger the plotting of the 4 mayor plots for an overview.
>>> ta.Plot_RAW()
This would plot only the kinetics.
>>> ta.Plot_RAW(1)
>>> ta.Plot_RAW(plotting = 1)
'''
path=check_folder(path=path,current_path=self.path)
if self.save_figures_to_folder:
self.figure_path=path
if cmap is None:cmap=self.cmap
if ds is None:
if self.ds is None:
ds=self.ds_ori.copy()
else:
ds=self.ds.copy()
if filename is None: filename=self.filename
if not hasattr(plotting,"__iter__"):plotting=[plotting]
if title is None:
if filename is None:
title=self.filename
else:
title=filename
plot_raw(ds=ds, plotting=plotting, cmap=cmap, title=title, path=path, filename=filename,
intensity_range=self.intensity_range, log_scale=self.log_scale, baseunit=self.baseunit,
timelimits=self.timelimits, scattercut=self.scattercut, bordercut=self.bordercut,
wave_nm_bin=self.wave_nm_bin, rel_wave=self.rel_wave, width=self.wavelength_bin,
time_width_percent=self.time_width_percent, ignore_time_region=self.ignore_time_region,
time_bin=self.time_bin, rel_time=self.rel_time, save_figures_to_folder=self.save_figures_to_folder,
savetype=savetype,plot_type=scale_type,lintresh=self.lintresh, times=times,
print_click_position = print_click_position, data_type = self.data_type,
plot_second_as_energy = plot_second_as_energy, units=self.units, equal_energy_bin = self.equal_energy_bin)
def Save_Plots(self, path = 'result_figures', savetype = None, title = None, filename = None, scale_type = 'symlog',
patches = False, cmap = None):
'''Convenience function that sets save_plots_to_folder temporarily to true and replots everything
Parameters
----------
path : None, str or path, optional
(Default) None, if left on None, then a folder "result_figures" is created in the folder
of the data (self.path)
savetype : str or iterable (of str), optional
matplotlib allows the saving of figures in various formats. (Default) "png",
typical and recommendable options are "svg" and "pdf".
title : None or str, optional
(Default) None, Use this title on all plots. if None, use self.filename
filename : str, optional
(Default) None, Base name for all plots. If None, then self.filename will be used
scale_type : str, optional
"symlog" (Default), "linear", "log" time axis
patches : bool, optional
For true use white patches to label things in the 2d matrixes, to safe
space for publication
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
Examples
---------
>>> ta.Save_Plots()
>>> ta.Save_Plots(patches = True)
'''
if cmap is None:cmap=self.cmap
if savetype is None:
savetype=['png']
elif savetype in ['png','pdf','svg']:savetype=[savetype]
elif hasattr(savetype,"__iter__"):savetype=list(savetype)
else:
print('Please specify a single filetype from \'png\',\'pdf\',\'svg\' or a list of those. Nothing was saved')
return False
if cmap is None:cmap=standard_map
origin=self.save_figures_to_folder
self.save_figures_to_folder=True
try:
for t in savetype:
plt.close('all')
self.Plot_RAW(savetype = t, path = path, cmap = cmap, title = title,
scale_type = scale_type, filename = filename, units=self.units,
equal_energy_bin = self.equal_energy_bin)
plt.close('all')
print('saved RAW plots type %s to %s'%(t,check_folder(path=path,current_path=self.path)))
except:
print('Saving of Raw plots for filetype %s failed'%t)
try:
for t in savetype:
plt.close('all')
self.Plot_fit_output(savetype=t,path=path,cmap=cmap,title=title,scale_type=scale_type,patches=patches,filename=filename)
plt.close('all')
print('Saved Fit plots of type %s to %s'%(t,check_folder(path=path,current_path=self.path)))
except:
print('Saving of Fit plots for filetype %s failed'%t)
self.save_figures_to_folder=origin
def __Fit_Chirp_inner( self, opt_coeff, initial_fit_coeff = None, params = None, scattercut = None, bordercut = None,
timelimits = None, wave_nm_bin = None, time_bin = None, mod = None, log_fit = None, ds_back_corr = None):
''' Function to calculate a new chirp corrected matrix and return an error value, The "cost function" for the chirp optimization
'''
fitcoeff = np.array([opt_coeff['p4'].value, opt_coeff['p3'].value, opt_coeff['p2'].value, opt_coeff['p1'].value, opt_coeff['p0'].value])
#fitcoeff = __shift_polynom(fitcoeff, -500) #this was an arbitrary shift of the data by 500 to make the chirp parameter
time = ds_back_corr.index.values.astype('float')#extract the time
ds_new = ds_back_corr.apply(lambda x:np.interp(x = time+np.polyval(fitcoeff, x.name), xp = time, fp = x), axis = 0, raw = False)
re = err_func(paras = params, ds = ds_new, mod = mod, final = False, log_fit = log_fit)
return re
def __Fit_Chirp_outer(self, pardf, results, fit_ds, fit_chirp_iterations, mod, deep_iteration = False):
'''Broken out Chirp optimization, takes the fitted parameters and performs 'fit_chirp_iterations' times the loop,
(optimise chirp + optimize global) after each global iteration the error is compared to the previous. It continues until no improvement is made or
until the 'fit_chirp_iterations' is reached. If the error is reduced by more than a factor of 100 in a single step, it is assumed that something fishy is going on and we restart the fit, but with a 10x smaller simplex stepsize and deep_iteraction FAlse
Parameters
-----------
pardf
# deep_iteration uses the previous kinetic optimized parameter as the input into the next fit.
#Can be great but can also run away, in general not needed and can be triggered by feeding the
#results back into the global fit'''
if pardf.vary.any():
initial_error = [results.residual[0]]
par_into_chirpfit = results.params
else:
initial_error = [err_func(paras = self.par_fit, ds = fit_ds, mod = self.mod, final = False, log_fit = self.log_fit)]
par_into_chirpfit = self.par
par_into_chirpfit['t0'].vary = False
initial_fit_coeff = self.fitcoeff
if len(initial_fit_coeff) == 6:
initial_fit_coeff[4] = self.fitcoeff[4]+self.fitcoeff[5]
initial_fit_coeff = initial_fit_coeff[:5]
chirp_par = lmfit.Parameters()
for i in range(5):
chirp_par.add('p%i'%(4-i), value = initial_fit_coeff[i])
chirp_par['p4'].set(min = chirp_par['p4']-0.5, max = chirp_par['p4']+0.5)
try:#lets send in the background corrected matrix fails if no prior background was done
correction = self.background_par[3]
ds_back_corr = self.ds_ori-correction
except:
ds_back_corr = self.ds_ori
ds_back_corr = sub_ds(ds = ds_back_corr, scattercut = self.scattercut, bordercut = self.bordercut,
timelimits = self.timelimits, wave_nm_bin = self.wave_nm_bin, time_bin = self.time_bin,
equal_energy_bin = self.equal_energy_bin)
print('Before chirpfit the error is:{:.6e}'.format(initial_error[-1]))
#################################################################################################################
#----Chirp fit loop---------------------------------------------------------------------------------
#################################################################################################################
for loop in range(fit_chirp_iterations):
chirpmini = lmfit.Minimizer(self.__Fit_Chirp_inner, chirp_par,
fcn_kws = {'ds_back_corr':ds_back_corr.copy(), 'initial_fit_coeff':initial_fit_coeff,
'params':par_into_chirpfit, 'mod':self.mod, 'log_fit':self.log_fit,
'scattercut':self.scattercut, 'bordercut':self.bordercut,
'timelimits':self.timelimits, 'wave_nm_bin':self.wave_nm_bin,
'time_bin':self.time_bin})
step_size = 5e-2
try:
start = tm.time()
simp = np.array([chirp_par['p4'].value, chirp_par['p3'].value, chirp_par['p2'].value, chirp_par['p1'].value, chirp_par['p0'].value])
simp = np.tile(simp.reshape(5, 1), 6).T
for i in range(5):
if simp[i+1, i] != 0:
if i<4:
simp[i+1, i] = simp[i+1, i]*(step_size)
else:
simp[i+1, i] = simp[i+1, i]+0.1
else:
simp[i+1, i] = 1e-4
#we start by optimizing the chirp with fixed Global fit
chirp_results = chirpmini.minimize('nelder', options = {'maxfev':1e4, 'fatol':initial_error[-1]*1e-6, 'initial_simplex':simp})
end = tm.time()
opt_coeff = chirp_results.params
temp = np.array([opt_coeff['p4'].value, opt_coeff['p3'].value, opt_coeff['p2'].value, opt_coeff['p1'].value, opt_coeff['p0'].value])
#Create the new chirp corrected data
time = ds_back_corr.index.values.astype('float')#extract the time
new_ds = ds_back_corr.copy().apply(lambda x:np.interp(x = time+np.polyval(temp, float(x.name)), xp = time, fp = x), axis = 0, raw = False)
#New Global Fit
fit_ds_loop = sub_ds(ds = new_ds, scattercut = self.scattercut, bordercut = self.bordercut, timelimits = self.timelimits, wave_nm_bin = self.wave_nm_bin, equal_energy_bin = self.equal_energy_bin, time_bin = self.time_bin)
if pardf.vary.any():
mini = lmfit.Minimizer(err_func, par_into_chirpfit, fcn_kws = {'ds':fit_ds_loop, 'mod':mod, 'log_fit':self.log_fit, 'final':False})
results_in_chirp = mini.minimize('nelder', options = {'maxiter':1e5})
initial_error.append(results_in_chirp.residual[0])
else:
initial_error.append(err_func(paras = par_into_chirpfit, ds = fit_ds_loop, mod = mod, final = False, log_fit = self.log_fit))
if initial_error[-1]<initial_error[-2]:
if initial_error[-2]/initial_error[-1]>100:#something fishy going on. lets try again
print('Chirp_loop {:02d} strange decrease step size'.format(loop+1))
initial_error[-1] = initial_error[-2]
step_size = step_size/100
if len(initial_error)>4:
if initial_error[-4] == initial_error[-1]:#we have run this trick now three times, time to break
raise StopIteration
deep_iteration=False
else:
print('Chirp_loop {:02d} resulted in :{:.8e}'.format(loop+1, initial_error[-1]))
if deep_iteration: #This results in a very deep iteration of the starting parameter
if pardf.vary.any():
par_into_chirpfit = results_in_chirp.params
chirp_par = chirp_results.params
else:
raise StopIteration
except StopIteration:
print('iteration is not smaller finished chirp looping')
break
except:
print('failure in chirp optimisation in iteration %i'%(loop+1))
import sys
print("Unexpected error:", sys.exc_info()[0])
initial_error.append(initial_error[0])#to avoid that numbers are written
break
#################################################################################################################
#-----------------------------------------end chrip fit loop-------------------------------------------------
#################################################################################################################
if initial_error[-1]<initial_error[0]:#lets check if we improved anything
print('chirp fit improved error by %.2g percent'%(100*(1-initial_error[-1]/initial_error[0])))
if isinstance(temp, list) or isinstance(temp, type(np.arange(1))):
self.fitcoeff = temp
else:
raise
time = ds_back_corr.index.values.astype('float')#extract the time
self.ds = ds_back_corr.apply(lambda x:np.interp(x = time+np.polyval(temp, float(x.name)), xp = time, fp = x), axis = 0, raw = False)
fit_ds = sub_ds(ds = self.ds, scattercut = self.scattercut, bordercut = self.bordercut, timelimits = self.timelimits, wave_nm_bin = self.wave_nm_bin, equal_energy_bin = self.equal_energy_bin, time_bin = self.time_bin)
if pardf.vary.any():
results.params = results_in_chirp.params
return results, fit_ds
def Fit_Global(self, par = None, mod = None, confidence_level = None, use_ampgo = False, fit_chirp = False, fit_chirp_iterations = 10,
multi_project = None, unique_parameter = None, weights = None, same_DAS = False,
dump_paras = False, dump_shapes = False, filename = None, ext_spectra = None):
"""This function is performing a global fit of the data. As embedded object it uses
the parameter control options of the lmfit project as an essential tool.
(my thanks to Matthew Newville and colleagues for creating this phantastic tool)
[M. Newville, T. Stensitzki, D. B. Allen, A. Ingargiola, 2014. DOI: 10.5281/ZENODO.11813.].
The what type of fitting is performed is controlled by setting of the parameter here.
The general fitting follows this routine:
1. create a copy of the Data-Matrix self.ds is created with the shaping parameters
2. Then a Matrix is created that represents the fractional population of each species
(or processes in case of the paral model).
This Matrix contains one entry for each timepoint and represents the kinetic model
based upon the starting parameter. (see below for a description of the models).
This model formation can by done by using a build in or a user supplied function.
(handled in the function "pf.build_c")
-> If an ext_spectra is provided this its intensity is substacted from the matrix (only for external models)
3. Then the process/species associated spectra for each of the species is calculated
using the linalg.lstsq algorithm from numpy
(https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html)
4. From the convoluted calculated species concentrations and spectra a calculated matrix
is formed (handled in the function "pf.fill_int")
5. The difference between calculated and measured spectra is calculated, point-wise squared
and summed together. (function "err_func" or "err_func_multi" if multiple datasets are fitted)
6. This difference is minimized by iterating 2-4 with changing parameters using an
optimization algorithm (generally nelder-mead simplex)
7. Finally in a last run of 2-5 the final spectra are calculated (using the "final" flag)
and the optimized parameter, the matrixes
("A"-measured, "AC" - calculated, "AE" - linear error),
spectra (always called "DAS") the concentrations (called "c")
are written in the dictionary "ta.re" together with a few result representations
and other fit outputs. The optimized parameter are also written into ta.par_fit
(as an parameter object) that can be re-used as input into further optimization steps.
All mandatory parameters are in general taken from the internal oject (self) The optional parameter control the behaviour of the fitting function
Parameters
------------------
par : lmfit parameter oject, optional
Here another parameter object could be given,overwriting the (Default is self.par)
mod : str or function, optional
Give a extra model selection (Default uses self.mod)
internal modells: 'paral','exponential','consecutive','full_consecutive'
see also :meth:`plot_func.build_c` and :meth:`plot_func.err_func`
confidence_level: None or float (0.5-1), optional
If this is changed from None (Default) to a value between 0.5 and 1 the code will
try to calculate the error of the parameter for the fit. For each parameter that
can vary a separate optimization is performed, that attempts to find the upper
and lower bound at which the total error of the re-optimized globally fitted results
reaches the by F-statistics defined confidence bound. See :meth:`plot_func.s2_vs_smin2` for details
on how this level is determined. Careful, this option might run for very long time.
Meaning that it typically takes 50 optimization per variable parameter (hard coded limit 200)
The confidence level is to be understood that it defines the e.g. 0.65 * 100\% area that the
parameter with this set of values is within this bounds.
use_ampgo : bool, optional
(Default) is False
Changes the optimizer from a pure Nelder mead to Ampgo with a local Nelder Mead.
For using this powerfull tool all parameter need to have a "min" and a "max" set.
Typically takes 10-40x longer than a standard optimization, but can due to its
tunneling algorithm more reliably find global minima.
see:https://lmfit.github.io/lmfit-py/fitting.html for further details
fit_chirp : bool, optional
(Default) is False
a powerful optimization of the chirp parameter. For this to work the data
needs to include timepoints before and after t=0 and one should have reached
a decent fit of most features in the spectrum. We perform an Nelder-Mead optimisation
of the parameter followed by a Nelder-Mead optimization of the chirp parameter
as one iteration. After each consecutive optimization it is checked if the total error
improved. If not the fit is ended, if yes the maximum number of iterations
'fit_chirp_iterations' is performed. Warning, this goes well in many cases,
but can lead to very strange results in others, always carefully check the results.
I recommend to make a copy of the object before runnning a chirp optimization.
fit_chirp_iterations : int, optional
maximum number of times the global - chirp loop is repeated.
Typically this iterations run 2-5 times, (Default) is 10
dump_paras : bool, optional
(Default) is False, If True creates two files in the working folder, one with the
currently used parameter created at the end of each optimisation step, and one with
the set of parameter that up to now gave the lowest error. Intented to store
the optimisation results if the fit needs to be interrupted
(if e.g. Ampgo simply needs to long to optimize.) useful option if things are slow
this parameter also triggers the writing of fitout to a textfile on disc
dump_shapes : bool, optional
this option dumps the concentratoin matrix and the DAS onto disk for each round of optimization,
mostly useful for multi-project fitting that wants to use the spectral or temporal intensity
filename : None or str, optional
Only used in conjunction with 'dump_paras'. The program uses this filename to dump the
parameter to disk
multi_project : None or list (of TA projects), optional
This switch is triggering the simultaneous optimisation of multiple datasets.
multi_project is as (Default) None. it expects an iterable (typically list) with other
TA projects (like ta) that are then optimised with the same parameter.
This means that all projects get the same parameter object for each iteration
of the fit and return their individual error, which is summed linearly.
The "weights" option allows to give each multi_project a specific weight (number)
that is multiplied to the error. If the weight object has the same number of items
as the multi_project it is assumed that the triggering object (the embedded project)
has the weight of 1, otherwise the first weight is for the embedded project.
The option 'unique_parameter' takes (a list) of parameter that are not
to be shared between the projects (and that are not optimized either)
The intended use of this is to give e.g. the pump power for multiple experiments to
study non linear behaviour. Returned will be only the parameter set for the optimium
combination of all parameter. Internally, we iterate through the projects and calculate
for each project the error for each iteration. Important to note is that currently this
means that each DAS/SAS is calculated independently! For performing the same calculation
with a single DAS, the Matrixes need to be concatenated before the run and an external
function used to create a combined model. As this is very difficult to implement reliably
For general use (think e.g. different pump wavelength) this has to be done manually.
unique_parameter : None or str or list (of strings), optional
only used in conjunction with 'multi_project', it takes (a list) of parameter that
are not to be shared between the projects (and that are not optimized either)
The intended use of this is to give e.g. the pump power for multiple experiments
to study non linear behaviour. (Default) None
same_DAS : bool,optional
changes the fit behavior and uses the same DAS for the optimization.
This means that the ds are stacked before the fill_int rounds. This option is only used in multi-project fitting
weights : list of floats, optional
only used in conjunction with 'multi_project'. The "weights" option allows to
give each multi\_project a specific weight (number) that is multiplied to the error.
If the weight object has the same number of items as the 'multi_project' it is assumed
that ta (the embedded project) has the weight of 1, otherwise the first weight is for the
embedded object
ext_spectra : DataFrame, optional
(Default) is None, if given substract this spectra from the DataMatrix using the intensity
given in "C(t)" this function will only work for external models. The name of the spectral column
must be same as the name of the column used. If not the spectrum will be ignored. The spectrum will
be interpolated to the spectral points of the model ds before the substraction.
Returns
------------------
re : dict
the dictionary "re" attached to the object containing all the matrixes and parameter.
The usual keys are:
"A" Shaped measured Matrix
"AC" Shaped calculated Matrix
"AE" Difference between A and AC = linear error
"DAS" DAS or SAS, labeled after the names given in the function (the columns of c) Care must be taken that this mesured intensity is C * DAS, the product. For exponential model the concentrations are normalized
"c" The Concentrations (meaning the evolution of the concentrations over time. Care must be taken that this mesured intensity is C * DAS, the product. For exponential model the concentrations are normalized
"fit_results_rates" DataFrame with the fitted rates (and the confidence intervals if calculated)
"fit_results_times" DataFrame with the fitted decay times (and the confidence intervals if calculated)
"fit_output" The Fit object as returned from lmfit. (This is not saved with the project!)
"error" is the S2, meaning AE**2.sum().sum()
"r2"=1-"error"/(('A'-'A'.mean())**2).sum(), so the residuals scaled with the signal size
par_fit : lmfit parameter object
is written into the object as a lmfit parameter object with the optimized results (that can be use further)
fitcoeff : list, if chirpfit is done
The chirp parameter are updated
ds : DataFrame, if chirpfit is done
A new ds is calculated form ds_ori if ChripFit is done
The rest is mainly printed on screen.
Examples
--------------------
Non optional:
>>> ta=pf.TA('testfile.SIA') #load data
>>> ta.mod='exponential' #define model
>>> ta.par=lmfit.Parameters() #create empty parameter object
>>> ta.par.add('k0',value=1/0.1,vary=True) #add at least one parameter to optimize
Trigger simple fit:
>>> ta.Fit_Global()
Trigger fit with Chrip Fit:
>>> ta.Fit_Global(fit_chirp=True)
Trigger iterative Chirp fitting with fresh refinement of the Global kinetic parametersfor i in range(5):
>>> for i in range(5):
>>> start_error=ta.re['error']
>>> ta.par=ta.par_fit
>>> ta.Fit_Global(fit_chirp=True)
>>> if not ta.re['error'] < start_error:break
Trigger fit fit error calculations
>>> ta.Fit_Global(confidence_level=0.66)
Trigger fit of multiple projects
#use the GUI_open function to open a list of objects (leave empty for using the GUI)
>>> other_projects=pf.GUI_open(['sample_1.hdf5','sample_2.hdf5'],path='Data')
>>> ta.Fit_Global(multi_project=other_projects)
For more examples please see the complete documentation under :ref:`Fitting, Parameter optimization and Error estimation`
or :ref:`Fitting multiple measured files at once`
"""
if par is None:par=self.par
if mod is None:mod=self.mod
try:
t0=par['t0']
except:
try:
par.add('t0',value=0,min=-0.5,max=0.5,vary=False)
except:
print("Unexpected error:", sys.exc_info()[0])
try:
resolution=par['resolution']
except:
try:
par.add('resolution',value=0.086,min=0.04,max=0.5,vary=False)
except:
print("Unexpected error:", sys.exc_info()[0])
try:
par['infinite'].value=1
par['infinite'].vary=False
except:
pass
try:
par['background'].value=1
par['background'].vary=False
except:
pass
pardf=par_to_pardf(par)
pardf.loc[np.logical_and(pardf.loc[:,'min'].values<0,pardf.is_rate),'min']=0
pardf.loc[np.logical_and(pardf.loc[:,'max'].values<0,pardf.is_rate),'max']=0
pardf['init_value']=pardf['value']
if dump_paras:
pardf_temp=pardf.copy()
pardf_temp.loc['error','value']=1000
pardf_temp.to_csv('minimal_dump_paras.par')
if self.log_fit:
for key in ['value','min','max']:
pardf.loc[pardf.is_rate,key]=pardf.loc[pardf.is_rate,key].apply(lambda x: np.log10(x))
#create-shape the data to be fitted
fit_ds = sub_ds(ds = self.ds.copy(), scattercut = self.scattercut, bordercut = self.bordercut,
timelimits = self.timelimits, wave_nm_bin = self.wave_nm_bin, equal_energy_bin = self.equal_energy_bin,
time_bin = self.time_bin, ignore_time_region = self.ignore_time_region, drop_scatter = True, drop_ignore = True)
time_label=fit_ds.index.name
energy_label=fit_ds.columns.name
############################################################################
#----Global optimisation------------------------------------------------------
############################################################################
if multi_project is None:
#check if there is any concentration to optimise
if (filename is None) and dump_shapes: filename = self.filename
if pardf.vary.any():#ok we have something to optimize
mini = lmfit.Minimizer(err_func,pardf_to_par(pardf),
fcn_kws={'ds':fit_ds,'mod':mod,'log_fit':self.log_fit,'final':False,
'dump_paras':dump_paras,'filename':filename,'ext_spectra':ext_spectra,
'dump_shapes':dump_shapes})
if not use_ampgo:
if len(pardf[pardf.vary].index)>3:
print('we use adaptive mode for nelder')
results = mini.minimize('nelder',options={'maxiter':1e5,'adaptive':True})
else:
results = mini.minimize('nelder',options={'maxiter':1e5})
else:
results = mini.minimize('ampgo',**{'local':'Nelder-Mead'})
############################################################################
#----Multi project Global optimisation----------------------------------------
##########################################################################
else:
fit_chirp=False #chirp fitting currently only works for single problems
if pardf.vary.any():#ok we have something to optimize lets return the spectra
multi_project.insert(0,self)
mini = lmfit.Minimizer(err_func_multi,pardf_to_par(pardf),fcn_kws={'multi_project':multi_project,'unique_parameter':unique_parameter,
'weights':weights,'mod':mod,'log_fit':self.log_fit,'final':False,
'dump_paras':dump_paras,'filename':filename,'ext_spectra':ext_spectra,
'dump_shapes':dump_shapes,'same_DAS':same_DAS})
if len(pardf[pardf.vary].index)>3:
print('we use adaptive mode for nelder')
results = mini.minimize('nelder',options={'maxiter':1e5,'adaptive':True})
else:
results = mini.minimize('nelder',options={'maxiter':1e5})
#######################################################################
#----Fit chirp----------------------------------------------------------------------------------
####################################################################
if self.ignore_time_region is not None:
if fit_chirp:
print('sorry but currently you can not both ignore a time region and fit the chirp (assuming that you ignore the time-zero region)')
fit_chirp=False
if fit_chirp:
print('Done initial fitting now chirpfit')
results,fit_ds=self.__Fit_Chirp_outer(pardf,results,fit_ds,fit_chirp_iterations,mod)
####################################################################
#------Write results to parameter------------------------
############################################################
if pardf.vary.any():#we actually have optimised something
pardf['value']=par_to_pardf(results.params)['value']
if self.log_fit:
for key in ['value','min','max']:
pardf.loc[pardf.is_rate,key]=pardf.loc[pardf.is_rate,key].apply(lambda x: 10**x)
self.par_fit=pardf_to_par(pardf)
else:
print('ATTENTION: we have not optimized anything but just returned the parameters')
self.par_fit=self.par
if multi_project is None:
re=err_func(paras=self.par_fit,ds=fit_ds,mod=self.mod,final=True,log_fit=self.log_fit,ext_spectra=ext_spectra)
else:
if same_DAS:
re_listen = err_func_multi(paras = self.par_fit, mod = mod, final = True, log_fit = self.log_fit,
multi_project = multi_project, unique_parameter = unique_parameter, same_DAS = same_DAS, weights = weights,
ext_spectra = ext_spectra)
re=re_listen[0]
else:
re = err_func_multi(paras = self.par_fit, mod = mod, final = True, log_fit = self.log_fit,
multi_project = multi_project, unique_parameter = unique_parameter, same_DAS = same_DAS, weights = weights,
ext_spectra = ext_spectra)
############################################################################
#----Estimate errors---------------------------------------------------------------------
############################################################################
if confidence_level is not None:#ok we calculate errors to the level of the confidence_level
if self.log_fit:
for key in ['value','min','max']:
pardf.loc[pardf.is_rate,key]=pardf.loc[pardf.is_rate,key].apply(lambda x: np.log10(x))
if pardf.vary.any():#we actually have optimised something
if (0.6 < confidence_level < 1) or (1 < confidence_level < 0.6):
if multi_project is None:
target=s2_vs_smin2(Spectral_points=len(re['A'].columns),Time_points=len(re['A'].index),number_of_species=len(re['DAC'].columns),fitted_kinetic_pars=len(pardf[pardf.vary].index),target_quality=confidence_level)
else:
multi_project.insert(0,self)
# we assume that we have the same number of spectal points but are stacking the times
total_time_points=np.array([len(t.re['A'].index) for t in multi_project]).sum()
target=s2_vs_smin2(Spectral_points=len(re['A'].columns),Time_points=total_time_points,number_of_species=len(re['DAC'].columns),fitted_kinetic_pars=len(pardf[pardf.vary].index),target_quality=confidence_level)
#print(target)
target_s2=re['error']*target
list_of_variable_parameter=pardf[pardf.vary].index.values
conf_limits={}
iterative_calls=0
for fixed_par in list_of_variable_parameter:
conf_limits[fixed_par]={'upper':None,'lower':None}
for i in ['lower','upper']:
print('Trying to find %s, %s confidence limit'%(fixed_par,i))
pardf_local=self.par_fit.copy()
pardf_local[fixed_par].vary=False
par_local=lmfit.Parameters()
if 'lower' in i:#go below min
if par_to_pardf(pardf_local).loc[fixed_par,'is_rate']:
par_local.add(fixed_par,value=pardf_local[fixed_par].value*0.95,min=0,max=pardf_local[fixed_par].value,vary=True)
else:
par_local.add(fixed_par,value=pardf_local[fixed_par].value*0.95,max=pardf_local[fixed_par].value,vary=True)
else: #go above min
par_local.add(fixed_par,value=pardf_local[fixed_par].value*1.05,min=pardf_local[fixed_par].value,vary=True)
def sub_problem(par_local,varied_par,pardf_local,fit_ds=None,mod=None,log_fit=None,multi_project=None,unique_parameter=None,weights=None,target_s2=None,ext_spectra=None,same_DAS=False ):
pardf_local[varied_par].value=par_local[varied_par].value
if par_to_pardf(pardf_local).vary.any():
if multi_project is None:
mini_sub = lmfit.Minimizer(err_func,pardf_local,fcn_kws={'ds':fit_ds,'mod':mod,'log_fit':log_fit,'ext_spectra':ext_spectra})
else:
mini_sub = lmfit.Minimizer(err_func_multi,pardf_local,fcn_kws={'multi_project':multi_project,'unique_parameter':unique_parameter,'weights':weights,
'same_DAS':same_DAS,'mod':mod,'log_fit':log_fit,'ext_spectra':ext_spectra})
if len(pardf[pardf.vary].index)>3:
results_sub = mini_sub.minimize('Nelder',options={'maxiter':1e5,'adaptive':True})
else:
results_sub = mini_sub.minimize('Nelder',options={'maxiter':1e5})
local_error=(results_sub.residual[0]-target_s2)**2
return local_error
else:
if multi_project is None:
return err_func(pardf_local,ds=fit_ds,mod=mod,log_fit=log_fit,ext_spectra=ext_spectra)
else:
return err_func_multi(pardf_local,multi_project=multi_project,unique_parameter=unique_parameter,weights=weights,mod=mod,log_fit=log_fit,ext_spectra=ext_spectra)
try:
mini_local = lmfit.Minimizer(sub_problem,par_local,fcn_kws={'varied_par':fixed_par,'pardf_local':pardf_local,'fit_ds':fit_ds,
'multi_project':multi_project, 'unique_parameter':unique_parameter,'same_DAS':same_DAS,'weights':weights,
'mod':mod,'log_fit':self.log_fit,'target_s2':target_s2,'ext_spectra':ext_spectra})
one_percent_precission=(target-1)*0.01*re['error']
#results_local = mini_local.minimize('least_squares',ftol=one_percent_precission)
results_local = mini_local.minimize(method='nelder',options={'maxiter':100,'fatol':one_percent_precission})
iterative_calls+=results_local.nfev
if results_local.success:
conf_limits[fixed_par][i]=results_local.params[fixed_par].value
else:
print("tried to optimise %i times achieved residual %g with targeted %g"%(results_local.nfev,(np.sqrt(results_local.residual[0])+target_s2),target_s2))
except:
#print("Unexpected error:", sys.exc_info()[0])
print("error in %s at %s limit"%(fixed_par,i))
continue
else:
print("please use a confidence level between 0.6 and 1")
return False
print("it took %i optimisations to get the confidence"%iterative_calls)
############################################################################
#-----prepare frames for storage without confidence and store them------------------------
############################################################################
if pardf.vary.any():
re['fit_output']=results#let's store the fit results in the re_object for now.
if confidence_level is not None:
re['confidence']=conf_limits
pardf.insert(len(pardf.columns),'lower_limit',None)
pardf.insert(len(pardf.columns),'upper_limit',None)
for key in conf_limits.keys():
pardf.loc[key,'lower_limit']=conf_limits[key]['lower']
pardf.loc[key,'upper_limit']=conf_limits[key]['upper']
if self.log_fit:
for key in ['value','min','max','lower_limit','upper_limit']:
for row in pardf[pardf.is_rate].index.values:
try:
pardf.loc[row,key]=10**pardf.loc[row,key]
except:
if pardf.loc[row,key] is None:
continue
elif pardf.loc[row,key].isnan():
continue
else:
print('%s,%s has could not be converted and has value'%(row,key))
print(pardf.loc[row,key])
continue
re['confidence']['target-level']='%.1f\n'%((confidence_level)*100)
re['fit_results_rates']=pardf
timedf=pardf_to_timedf(pardf)
re['fit_results_times']=timedf
if same_DAS:
for i,re_local in enumerate(re_listen):
for name in ['fit_output','fit_results_rates','fit_results_times']:
re_listen[i][name]=re[name]
###############################################
##convert energy back to wavelength#############
################################################
if 1:
if self.equal_energy_bin is not None:
if same_DAS:
for i,re_local in enumerate(re_listen):
for name in ['A','AC','AE']:
re_local[name].columns=(scipy.constants.h*scipy.constants.c/(re_local[name].columns.values*1e-9*scipy.constants.electron_volt))
re_local[name].columns.name='wavelength in nm'
re_local[name].sort_index(inplace=True,axis=1,ascending=True)
re_local['DAC'].index=(scipy.constants.h*scipy.constants.c/(re_local['DAC'].index.values*1e-9*scipy.constants.electron_volt))
re_local['DAC'].index.name='wavelength in nm'
re_local['DAC'].sort_index(inplace=True,axis=0,ascending=True)
re_listen[i]=re_local
else:
for name in ['A','AC','AE']:
re[name].columns=(scipy.constants.h*scipy.constants.c/(re[name].columns.values*1e-9*scipy.constants.electron_volt))
re[name].columns.name='wavelength in nm'
re[name].sort_index(inplace=True,axis=1,ascending=True)
re['DAC'].index=(scipy.constants.h*scipy.constants.c/(re['DAC'].index.values*1e-9*scipy.constants.electron_volt))
re['DAC'].index.name='wavelength in nm'
re['DAC'].sort_index(inplace=True,axis=0,ascending=True)
############################################################################
#---print the output---------------------------------------------------
############################################################################
self.re=re
if same_DAS:
re_listen[0]=re
self.multi_projects=re_listen
Result_string='\nFit Results:\n'
if isinstance(mod,type('hello')):
Result_string+='Model Used: %s\n\n'%mod
else:
Result_string+='Model Used: External function\n\n'
if self.ignore_time_region is not None:
try:
Result_string+='the time between %.3f %s and %.3f %s was excluded from the optimization\n\n'%(self.ignore_time_region[0],self.baseunit,self.ignore_time_region[1],self.baseunit)
except:#we got a list
for entry in self.ignore_time_region:
Result_string+='the time between %.3f %s and %.3f %s was excluded from the optimization\n\n'%(entry[0],self.baseunit,entry[1],self.baseunit)
Result_string+='The minimum error is:{:.8e}\n'.format(re['error'])
Result_string+='The minimum R2-value is:{:.8e}\n'.format(re['r2'])
if same_DAS:
Result_string+='The minimum global error is:{:.8e}\n'.format(re['error_total'])
Result_string+='The minimum global R2-value is:{:.8e}\n'.format(re['r2_total'])
if confidence_level is not None:
Result_string+='\nIn Rates with confidence interval to level of %.1f\n\n'%((confidence_level)*100)
Result_string+=pardf.to_string(columns=['value','lower_limit','upper_limit','init_value','vary','min','max','expr'])
Result_string+='\n\nThe rates converted to times with unit %s with confidence interval to level of %.1f\n\n'%(self.baseunit,(confidence_level)*100)
Result_string+=timedf.to_string(columns=['value','lower_limit','upper_limit','init_value','vary','min','max','expr'])
else:
Result_string+='\nIn Rates\n\n'
Result_string+=pardf.to_string(columns=['value','init_value','vary','min','max','expr'])
Result_string+='\n\nThe rates converted to times with unit %s\n\n'%self.baseunit
Result_string+=timedf.to_string(columns=['value','init_value','vary','min','max','expr'])
if same_DAS:
Result_string+='\n\nthe other objects were layed into self.multi_projects as list with the local re on position 0.\n By replacing assuming that self = ta write: \n ta.re = ta.multi_projects[1] and then ta.Plot_fit_output to look on the other fits\n '
print(Result_string)
if dump_paras:
with open("Fit_results_print.par", "w") as text_file:
text_file.write(Result_string)
def Plot_fit_output(self, plotting = range(6), path = 'result_figures', savetype = 'png',
evaluation_style = False, title = None, scale_type = 'symlog',
patches = False, filename = None, cmap = None , print_click_position = False,
plot_second_as_energy = True):
'''plots all the fit output figures. The figures can be called separately
or with a list of plots. e.g. range(6) call plots 0-5 Manual plotting of certain type:
This is a wrapper function that triggers the plotting of all the fitted plots.
The parameter in this plot call are to control the general look and features of the plot.
Which plots are printed is defined by the first command (plotting)
The plots are generated from the fitted Matrixes and as such only will work after a fit was actually
completed (and the "re" dictionary attached to the object.)
In all plots the RAW data is plotted as dots and the fit with lines
Contents of the plots
0. DAC contains the assigned spectra for each component of the fit. For
a modelling with independent exponential decays this corresponds to
the "Decay Associated Spectra" (DAS). For all other models this
contains the "Species Associated Spectra" (SAS). According to the
model the separate spectra are labeled by time (process) or name, if
a name is associated in the fitting model. The spectra are shown in
the extracted strength in the right pane and normalized in the left.
Extracted strength means that the measured spectral strength is the
intensity (concentration matrix) times this spectral strength. As the
concentration maxima for all DAS are 1 this corresponds to the
spectral strength for the DAS. (please see the documentation for the
fitting algorithm for further details).
1. summed intensity. All wavelength of the spectral axis are summed for
data and fit. The data is plotted in a number of ways vs linear and
logarithmic axis. This plot is not ment for publication but very
useful to evaluate the quality of a fit.
2. plot kinetics for selected wavelength (see corresponding RAW plot).
3. plot spectra at selected times (see corresponding RAW plot).
4. plots matrix (measured, modelled and error Matrix). The parameter are
the same as used for the corresponding RAW plot with the addition of
"error_matrix_amplification" which is a scaling factor multiplied
onto the error matrix. I recommend to play with different "cmap",
"log_scale" and "intensity_scale" to create a pleasing plot.
5. concentrations. In the progress of the modelling/fitting a matrix is
generated that contains the relative concentrations of the species
modelled. This plot is showing the temporal development of these
species. Further details on how this matrix is generated can be found
in the documentation of the fitting function. The modeled spectra are
the convolution of these vectors (giving the time-development) and
the DAS/SAS (giving the spectral development).
Parameters
---------------
plotting : int or iterable (of integers), optional
This parameter determines which figures are plotted
the figures can be called separately with plotting = 1
or with a list of plots (Default) e.g. plotting=range(6) calls plots 0,1,2,3,4,5
The plots have the following numbers:
0. DAS or SAS
1. summed intensity
2. Kinetics
3. Spectra
4. Matrixes
5. Concentrations (the c-object)
The plotting takes all parameter from the "ta" object unless otherwise specified
path : None, str or path object, optional
This defines where the files are saved if the safe_figures_to_folder parameter is True,
quite useful if a lot of data sets are to be printed fast.
If a path is given, this is used. If a string like the (Default) "result_figures" is given,
then a subfolder of this name will be used (an generated if necessary)
relative to self.path. Use and empty string to use the self.path
If set to None, the location of the plot_func will be used and
a subfolder with title "result_figures" be generated here
savetype : str or iterable (of str), optional
matplotlib allows the saving of figures in various formats. (Default) "png",
typical and recommendable options are "svg" and "pdf".
evaluation_style : bool, optional
True (Default = False) adds a lot of extra information in the plot
title : None or str, optional
"title=None" is in general the filename that was loaded. Setting a
specific title will be used in all plots. To remove the title all
together set an empty string with title=""
scale_type : str, optional
refers to the time-axis and takes, "symlog" (Default)(linear around zero and logarithmic otherwise)
and "lin" for linear and "log" for logarithmic, switching all the time axis to this type
patches : bool, optional
If False (Default) the names "measured" "fitted" "difference" will be placed above the images.
If True, then they will be included into the image (denser)
filename : str, optional
offers to replace the base-name used for all plots (to e.g.specify what sample was used).
if (Default) None is used, the self.filename is used as a base name. The filename plays only a
role during saving, as does the path and savetype
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
print_click_position : bool, optional
if True then the click position is printed for the spectral plots
Examples
------------
Typically one would call this function empty for an overview:
After the minimum fit
>>> ta=pf.TA('testfile.SIA')
>>> ta.par=lmfit.Parameters()
>>> ta.par.add('k0',value=1/0.1,vary=True)
>>> ta.Fit_Global()
One usually plots the an overview
>>> ta.Plot_fit_output()
>>> ta.Plot_fit_output(plotting=range(6)) #is the same as before
>>> ta.Plot_fit_output(2) #would plot only the kinetics
>>> ta.Plot_fit_output(plotting = 2) #would plot only the kinetics
'''
try:
re=self.re
except:
print('We need to have fitted something so that we can plot')
return False
path=check_folder(path=path,current_path=self.path)
if self.save_figures_to_folder:
self.figure_path=path
if cmap is None:cmap=self.cmap
if filename is None:filename=self.filename
if title is None:
if filename is None:
title=self.filename
else:
title=filename
if not hasattr(plotting,"__iter__"):plotting=[plotting]
plot_fit_output(self.re, self.ds, cmap = self.cmap, plotting = plotting, title = title,
path = path, f = filename, intensity_range = self.intensity_range,
log_scale = self.log_scale, baseunit = self.baseunit, timelimits = self.timelimits,
scattercut = self.scattercut, bordercut = self.bordercut,
error_matrix_amplification = self.error_matrix_amplification,
wave_nm_bin = self.wave_nm_bin, rel_wave = self.rel_wave, width = self.wavelength_bin,
rel_time = self.rel_time, save_figures_to_folder = self.save_figures_to_folder,
log_fit = self.log_fit,mod = self.mod, savetype = savetype,
time_width_percent = self.time_width_percent, evaluation_style = evaluation_style,
filename = self.filename, scale_type = scale_type, patches = patches, lintresh = self.lintresh,
print_click_position = print_click_position, ignore_time_region = self.ignore_time_region,
data_type = self.data_type, plot_second_as_energy = plot_second_as_energy, units= self.units,
equal_energy_bin = self.equal_energy_bin)
def Save_data(self, save_RAW = True, save_Fit = True, save_slices = True, save_binned = False,
filename = None, save_fit_results = True, path = 'Data_export', sep = str('\t')):
'''handy function to save the data on disk as dat files.
The RAW labeled files contain the chirp corrected values (self.ds)
the save_slices switch turns on the dump of the separate sliced figures (time and spectral)
Parameters
----------
save_binned : bool, optional
is also the re-binned matrix to be saved.
save_slices : bool, optional
save the kinetics and spectra from the fitted data (with the fits)
sep : str, optional
what symbol is used to separate different number. (typical either 'tab' or comma
save_RAW : bool, optional
(Default) True then the first slide with the RAW data is created
save_Fit : bool, optional
(Default) True then the second slide with the Fitted data is created
path : None, str or path, optional
(Default) None, if left on None, then a folder "result_figures" is created in the folder
of the data (self.path)
save_fit_results : bool, optional
if True (Default) a neatly formated file with the fit results is created and stored with the data
filename : str, optional
(Default) None, Base name for all plots. If None, then self.filename will be used
Examples
---------
>>> ta.Save_Data
'''
if filename is None:filename = self.filename.split('.')[0]
if save_RAW:
self.ds.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_chirp_corrected_raw_matrix.dat'), sep = sep)
if save_binned:
sub = sub_ds(self.ds, scattercut = self.scattercut, bordercut = self.bordercut,
timelimits = self.timelimits, wave_nm_bin = self.wave_nm_bin,
time_bin = self.time_bin)
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_chirp_corrected_rebinned_matrix.dat'), sep = sep)
if save_slices:
sub = sub_ds(ds = self.ds.copy(), wavelength_bin = self.wavelength_bin, wavelength = self.rel_wave)
#sub.columns.name = 'wavelength [nm] in %.0f bins'%self.wavelength_bin
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_chirp_corrected_RAW_kinetics.dat'), sep = sep)
sub = sub_ds(ds = self.ds.copy(), times = self.rel_time, time_width_percent = self.time_width_percent,
scattercut = self.scattercut, bordercut = self.bordercut, wave_nm_bin = self.wave_nm_bin)
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_chirp_corrected_RAW_Spectra.dat'), sep = sep)
if save_Fit:
try:
self.re.keys()
except:
print('no fit in data')
save_Fit = False
if save_Fit:
self.re['A'].to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_matrix used as fit input.dat'), sep = sep)
self.re['AC'].to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_matrix calculated during fit.dat'), sep = sep)
self.re['AE'].to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_error_matrix calculated during fit.dat'), sep = sep)
if save_slices:
sub = sub_ds(ds = self.re['AC'].copy(), wavelength_bin = self.wavelength_bin, wavelength = self.rel_wave)
#sub.columns.name = 'wavelenth [nm] in %.0f bins'%self.wavelength_bin
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_fitted_kinetics.dat'), sep = sep)
sub = sub_ds(ds = self.re['A'].copy(), wavelength_bin = self.wavelength_bin, wavelength = self.rel_wave)
#sub.columns.name = 'wavelenth [nm] in %.0f bins'%self.wavelength_bin
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_measured_kinetics.dat'), sep = sep)
sub = sub_ds(ds = self.re['AC'].copy(), times = self.rel_time,
time_width_percent = self.time_width_percent, scattercut = self.scattercut,
bordercut = self.bordercut, wave_nm_bin = self.wave_nm_bin)
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_fitted_spectra.dat'), sep = sep)
sub = sub_ds(ds = self.re['A'].copy(), times = self.rel_time,
time_width_percent = self.time_width_percent, scattercut = self.scattercut,
bordercut = self.bordercut, wave_nm_bin = self.wave_nm_bin)
sub.to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_measured_spectra.dat'), sep = sep)
self.re['DAC'].to_csv(check_folder(path = path, current_path = self.path,
filename = filename+'_DAS-SAS.dat'), sep = sep)
if save_fit_results:
Result_string='\nFit Results:\n'
if isinstance(self.mod,type('hello')):
Result_string+='Model Used: %s\n\n'%self.mod
else:
Result_string+='Model Used: External function\n\n'
if self.ignore_time_region is not None:
Result_string+='the time between %.3f %s and %.3f %s was excluded from the optimization\n'%(self.ignore_time_region[0],self.baseunit,self.ignore_time_region[1],self.baseunit)
Result_string+='The minimum error is:{:.8e}\n'.format(self.re['error'])
Result_string+='The minimum R2-value is:{:.8e}\n'.format(self.re['r2'])
if 'confidence' in self.re:
Result_string+='\nIn Rates with confidence interval to level of %s\n'%self.re['confidence']['target-level']
Result_string+=self.re['fit_results_rates'].to_string(columns=['value','lower_limit','upper_limit','init_value','vary','min','max','expr'])
Result_string+='\n\nThe rates converted to times with unit %s with confidence interval to level of %s\n'%(self.baseunit,self.re['confidence']['target-level'])
Result_string+=self.re['fit_results_times'].to_string(columns=['value','lower_limit','upper_limit','init_value','vary','min','max','expr'])
else:
Result_string+='\nIn Rates\n'
Result_string+=self.re['fit_results_rates'].to_string(columns=['value','init_value','vary','min','max','expr'])
Result_string+='\n\nThe rates converted to times with unit %s\n'%self.baseunit
Result_string+=self.re['fit_results_times'].to_string(columns=['value','init_value','vary','min','max','expr'])
with open(check_folder(path = path, current_path = self.path, filename = filename+'_fit_results_parameter.par'), "w") as text_file:
text_file.write(Result_string)
def Save_Powerpoint(self, save_RAW = True, save_Fit = True, filename = None,
path = 'result_figures', scale_type = 'symlog', title = None, patches = False, cmap=None , savetype = 'pptx'):
'''This function creates two power point slides. On the first it summarizes the RAW plots and on
the second (if existent) it summarizes the fitted results
Parameters
----------
save_RAW : bool, optional
(Default) True then the first slide with the RAW data is created
save_Fit : bool, optional
(Default) True then the second slide with the Fitted data is created
path : None, str or path, optional
(Default) None, if left on None, then a folder "result_figures" is created in the folder
of the data (self.path)
savetype : str or iterable (of str), optional
triggers the additional creation of a composite file in this format.
matplotlib allows the saving of figures in various formats. (Default) "png",
typical and recommendable options are "svg" and "pdf".
title : None or str, optional
(Default) None, Use this title on all plots. if None, use self.filename
filename : str, optional
(Default) None, Base name for all plots. If None, then self.filename will be used
scale_type : str, optional
'symlog' (Default), 'linear', 'log' time axis
patches : bool, optional
For true use white patches to label things in the 2d matrixes, to safe
space for publication
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
Examples
---------
>>> ta.Save_Powerpoint()
>>> ta.Save_Powerpoint(patches = True)
'''
if isinstance(savetype,type('hello')):savetype=[savetype]
if not hasattr(savetype,"__iter__"):savetype=[savetype]
raw_names=["MAT","SEL","SPEK","SVD"]
raw_names=[check_folder(current_path=self.path, path=path, filename=self.filename.split('.')[0] + "_RAW_"+str(a) +".png") for a in raw_names]
fit_names=["FIG_MAT","SPEC","SEL","SUM","DAC"]
fit_names=[check_folder(current_path=self.path, path=path, filename=self.filename.split('.')[0] + "_" +str(a) +".png") for a in fit_names]
plt.close('all')
origin=self.save_figures_to_folder
if filename is None:
filename=self.filename
filename=filename.split('.')[0]
if save_RAW:
self.save_figures_to_folder=True
self.Plot_RAW(savetype = 'png', scale_type = scale_type, title = title, cmap = cmap, path = path)
plt.close('all')
if save_Fit:
try:
self.save_figures_to_folder=True
self.Plot_fit_output(savetype = 'png', scale_type = scale_type, title = title, patches = patches, cmap = cmap , path = path)
plt.close('all')
except:
save_Fit = False
print('run into problems with adding the fit results. Have you fitted something?')
try:
Result_string='\nFit Results:\n'
if isinstance(self.mod,type('hello')):
Result_string+='Model Used: %s\n\n'%self.mod
else:
Result_string+='Model Used: External function\n\n'
if self.ignore_time_region is not None:
Result_string+='the time between %.3f %s and %.3f %s \n was excluded from the optimization\n'%(self.ignore_time_region[0],self.baseunit,self.ignore_time_region[1],self.baseunit)
Result_string+='The minimum error is:{:.8e}\n'.format(self.re['error'])
Result_string+='The minimum R2-value is:{:.8e}\n'.format(self.re['r2'])
if 'confidence' in self.re:
Result_string+='\nIn Rates with confidence interval to level of %s\n'%self.re['confidence']['target-level']
Result_string+=self.re['fit_results_rates'].to_string(columns=['value','lower_limit','upper_limit','init_value','vary','min','max','expr'])
Result_string+='\n\nThe rates converted to times with unit %s\n with confidence interval to level of %s\n'%(self.baseunit,self.re['confidence']['target-level'])
Result_string+=self.re['fit_results_times'].to_string(columns=['value','lower_limit','upper_limit','init_value','vary','min','max','expr'])
else:
Result_string+='\nIn Rates\n'
Result_string+=self.re['fit_results_rates'].to_string(columns=['value','init_value','vary','min','max','expr'])
Result_string+='\n\nThe rates converted to times with unit %s\n'%self.baseunit
Result_string+=self.re['fit_results_times'].to_string(columns=['value','init_value','vary','min','max','expr'])
except:
pass
if ('pdf' in savetype) or ('png' in savetype) or ('svg' in savetype):
if save_RAW:
fig,ax=plt.subplots(nrows=2,ncols=2,figsize=(10,7.5))
ax[0,1].imshow(mpimg.imread(str(raw_names[0])))
ax[0,0].imshow(mpimg.imread(str(raw_names[1])))
ax[1,0].imshow(mpimg.imread(str(raw_names[2])))
ax[1,1].imshow(mpimg.imread(str(raw_names[3])))
ax[0,0].axis('off');ax[1,0].axis('off');ax[0,1].axis('off');ax[1,1].axis('off')
for entry in savetype:
if entry == "pptx":continue
try:
fig.tight_layout()
fig.savefig(check_folder(path=path,current_path=self.path,filename=self.filename.split('.')[0] + '_RAW-summary.%s'%entry),dpi=600)
except:
print("saving in" + entry +"failed")
if save_Fit:
G = GridSpec(4, 8)
fig1=plt.figure(figsize=(10,7.5))
ax1=fig1.add_subplot(G[0,:6])
ax2=fig1.add_subplot(G[1,:6])
ax3=fig1.add_subplot(G[2,:6])
ax4=fig1.add_subplot(G[3,:6])
ax5=fig1.add_subplot(G[0:2,5:])
ax6=fig1.add_subplot(G[2:,6:])
ax1.imshow(mpimg.imread(str(fit_names[1])))
ax2.imshow(mpimg.imread(str(fit_names[2])))
ax3.imshow(mpimg.imread(str(fit_names[3])))
ax4.imshow(mpimg.imread(str(fit_names[4])))
ax5.imshow(mpimg.imread(str(fit_names[0])))
ax6.text(0,0,Result_string,fontsize=7,fontweight='normal')
ax1.axis('off');ax2.axis('off');ax3.axis('off');ax4.axis('off');ax5.axis('off');ax6.axis('off')
for entry in savetype:
if entry == "pptx": continue
try:
fig1.tight_layout()
fig1.savefig(check_folder(path=path,current_path=self.path,filename=self.filename.split('.')[0] + '_Fit-summary.%s'%entry),dpi=600)
except:
print("saving in" + entry +"failed")
if ('pptx' in savetype) or ('ppt' in savetype):
left=Inches(0.2)
top=Inches(0.2)
prs = Presentation()
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
if save_RAW:
left = top = Inches(0.5)
pic = slide.shapes.add_picture(str(raw_names[0].resolve()), left=left+Inches(4.5), top=top, width=Inches(4.5))
pic = slide.shapes.add_picture(str(raw_names[1].resolve()), left=left, top=top, width=Inches(4.5))
pic = slide.shapes.add_picture(str(raw_names[2].resolve()), left=left, top=top+Inches(3), width=Inches(4.5))
try:
pic = slide.shapes.add_picture(str(raw_names[3].resolve()), left=left+Inches(4.5), top=top+Inches(3), height=Inches(3.4))
except:
pass
if save_Fit:
try:
slide2 = prs.slides.add_slide(blank_slide_layout)
left = top = Inches(0.1)
pic = slide2.shapes.add_picture(str(fit_names[0].resolve()), left=left+Inches(5.5), top=top, height=Inches(3.5))#Matrix
pic = slide2.shapes.add_picture(str(fit_names[1].resolve()), left=left, top=top, height=Inches(2))
pic = slide2.shapes.add_picture(str(fit_names[2].resolve()), left=left, top=top+Inches(2), height=Inches(2))
pic = slide2.shapes.add_picture(str(fit_names[3].resolve()), left=left, top=top+Inches(3.9), height=Inches(1.4))
pic = slide2.shapes.add_picture(str(fit_names[4].resolve()), left=left, top=top+Inches(5.4), height=Inches(2))
text1 = slide2.shapes.add_textbox(left=left+Inches(5.5), top=top+Inches(3.5), width=Inches(4.5), height=Inches(4))
text1.text = Result_string
text1.text_frame.fit_text(font_family=u'Arial', max_size=8, bold=False, italic=False)
except:
print('exited when saving the fit plots')
plt.close('all')
self.save_figures_to_folder=origin
prs.save(check_folder(path=path,current_path=self.path,filename=self.filename.split('.')[0] + '.pptx'))
print('All data was saved to %s'%check_folder(path=path,current_path=self.path))
def Save_project(self, filename=None,path=None):
'''function to dump all the parameter of an analysis into an hdf5 file.
This file contains the ds_ori and all the parameter, including fitting parameter
and results.
One limitation is the fitting model. If the model is build in, so the model is
'exponential' or 'parallel' then the safing works. If an external model is used then the
dostring of the external function is stored, but not the function itself.
Parameters
----------
path : None, str or path, optional
(Default) None, if left on None, then a folder "Data" is created in the folder
of the project (self.path)
filename : str, optional
(Default) None, Base name for all plots. If None, then self.filename will be used
Examples
--------
>>> ta.Save_project()
'''
if filename is None:
filename = self.filename
hdf5_name =check_folder(path = path, current_path = self.path, filename = filename.split('.')[0]+'.hdf5')
if os.path.exists(hdf5_name):
try:
os.remove(hdf5_name)
except:
try:
hdf5_name.close()
os.remove(hdf5_name)
except:
print('File exists but can not be deleted')
re_switch = False
with h5py.File(hdf5_name, 'w') as f:
for key in self.__dict__.keys():
if key == 'mod':
if self.__dict__[key] in ['paral','exponential','consecutive','full_consecutive']:
f.create_dataset(name=key, data=self.__dict__[key])
else:
try:
docstring=self.__dict__[key].__doc__
if isinstance(docstring,type('hello')):
f.create_dataset(name=key, data=docstring)
except:
f.create_dataset(name=key, data='external_function_without_docstring')
elif key in ['rel_wave','rel_time']:#need extra, as it is bypassed by the re-switch
f.create_dataset(name=key, data=self.__dict__[key])
elif key[:2] == 're' :
re_switch = True
for key2 in self.__dict__['re']:
if key2 == 'fit_output':continue
data = self.__dict__['re'][key2]
if key2 == 'error':
try:
f.create_dataset(name='re_error', data=data)
except:
print('saving of ' + key2 + ' failed' )
elif isinstance(data, pandas.DataFrame):
pass
else:
try:
f.create_dataset(name='re_' + key2, data=data)
except:
print('saving of ' + key2 + ' failed' )
elif key == 'cmap':
pass
elif key == 'intensity_range':
data=self.__dict__['intensity_range']
if isinstance(data, type(1e-3)):
data=[-data,data]
if data is None:
f.create_dataset(name='intensity_range', data='None')
else:
f.create_dataset(name='intensity_range', data=data)
elif key == 'background_par':
f.create_dataset(name='back', data=self.__dict__['background_par'][3])
elif key in ['par','par_fit']:
df=par_to_pardf(self.__dict__[key])#pandas has a bug and problems handling mixed type columns when saving. So we clean up.
for sub_key in ['min','max','value']:
try:
df[sub_key]=df[sub_key].astype(float)
except:
pass
df['is_rate']=df['is_rate'].astype(bool)
df['vary']=df['vary'].astype(bool)
df['expr']=df['expr'].apply(lambda x:'%s'%x)
df.to_hdf(str(hdf5_name.resolve()), key=key, append=True, mode='r+', format='t')
else:
data = self.__dict__[key]
if data is None:
f.create_dataset(name=key, data='None')
else:
if isinstance(data, pandas.DataFrame):
pass
else:
try:
f.create_dataset(name=key, data=data)
except:
if key == 'path':
pass
elif key == 'figure_path':
pass
else:
print('the saving of %s failed'%key)
if not 'fitcoeff' in f:
try:
f.create_dataset(name='fitcoeff', data=self.fitcoeff)
except:
try:
with open(self.chirp_file,'r') as f2:
fitcoeff=f2.readline()
f.create_dataset(name='fitcoeff', data=fitcoeff)
except:
pass
self.ds_ori.to_hdf(str(hdf5_name.resolve()), key='ds_ori', append=True, mode='r+', format='t')#save_raw_data
if re_switch:
#print('re-switched')
for key in ['A', 'AC', 'AE', 'DAC', 'c']:
self.re[key].to_hdf(str(hdf5_name.resolve()), key='re_' + key, append=True, mode='r+', format='t')
try:
f.close()
except:
pass
print('The project was saved to %s'%check_folder(path = path, current_path = self.path))
def __read_project(self, saved_project=None,current_path=None):
'''function to re-read all the parameter of a previous analysis
into an hdf5 file, current path is the path that the file should
assume is its "home directory" after successful loading. If not
set we take the filepath at which the file is currently stored as path'''
if saved_project is None:
raise ImportError('We do need a project to import')
if current_path is None:current_path=os.path.dirname(os.path.abspath(saved_project))
try:
import h5py
except:
print('could not import hdf5, current version requires that this is installed. IF running Anaconda open Conda promt and type: conda install h5py')
data_frame_list=[]
# we hav to handle the old and new type of saving
with h5py.File(saved_project, 'r') as f:
if 're_final_setup_par' in f.keys():old_switch=True
else:old_switch=False
if old_switch:print('we read an old style data_file and directly update it into the new file_type after loading')
with h5py.File(saved_project, 'r') as f:
for key in f.keys():
try:
if "re_" in key[:3]:
if not 're' in self.__dict__.keys():
self.__dict__['re']={}
self.__dict__['re'][key[3:]]=f[key][()]
elif "back" in key[:4]:
rea=f[key][()]
self.__dict__['background_par']=[None,-1,False]
self.__dict__['background_par'].append(rea)
elif "par" in key[:3]:
if old_switch:#old type of saved data
try:
os.remove('temp_file.json')
except:
pass
with open('temp_file.json','w') as g:
g.write(f[key][()])
with open('temp_file.json','r') as g:
self.par=lmfit.Parameters()
self.par.load(g)
try:
os.remove('temp_file.json')
except:
pass
else:#new type of data
raise
else:
read=f[key][()]
if isinstance(read,bytes):
read=f[key].asstr()[()]
elif isinstance(read,type('hello')):
if (read=='None') or (read=='none'):
read=None
elif key in ['bordercut','timelimits','fitcoeff','scattercut']:
read=[float(a) for a in read]
elif key =='intensity_range':
read=[float(a) for a in read]
elif key in ['rel_time','rel_wave']:
read=np.array(read,dtype=np.float64)
elif key in ['scattercut']:
try:
read=[float(a) for a in read]
except:#maybe we have a list of scattercuts
try:
out_listen=[]
for listen in read:
outlisten.append([float(a) for a in listen])
except:#no idea lets see what happens
pass
self.__dict__[key]=read
except:#we'll get an exception every time there is an dataframe
#print('Frame:'+key)
data_frame_list.append(key)
try:
f.close()
except:
pass
for key in data_frame_list:
try:
if "re_" in key[:3]:
#print('re in list')
self.__dict__['re'][key[3:]]=pandas.read_hdf(saved_project,key=key,mode='r',data_columns=True)
elif key in ['ds_ori','par_fit','par']:
self.__dict__[key]=pandas.read_hdf(saved_project,key=key,mode='r',data_columns=True)
else:
print("missing key:" + key)
except:
if key == 'par' and old_switch:pass # we have read it before already and the error is ok
else:print("error in key:" + key)
try:
self.__dict__['re']['fit_results_rates']=self.__dict__['par_fit']
self.__dict__['re']['fit_results_times']=pardf_to_timedf(self.__dict__['re']['fit_results_rates'])
except:
pass
#the par conversion function failed, quickfix
for over_key in ['par_fit','par']:
try:
par_df=self.__dict__[over_key].loc[:,['value','min','max','vary','expr']]
par=lmfit.Parameters()
for key in par_df.index.values:
par.add(key, value=par_df.loc[key,'value'], vary=par_df.loc[key,'vary'], min=par_df.loc[key,'min'], max=par_df.loc[key,'max'])
self.__dict__[over_key]=par
except:
pass
if old_switch:
try:
self.__dict__['re']['fit_results_rates']=par_to_pardf(self.par)
self.__dict__['re']['fit_results_times']=pardf_to_timedf(par_to_pardf(self.par))
self.__dict__['par_fit']=self.par
except:
pass
self.save_figures_to_folder=False
self.path=current_path
if old_switch:#convert project into new type
#clean old files that were read wrong
for key in ['re_final_int_par','re_final_setup_par','re_final_time_par','re_int_error']:
try:
del self.__dict__[key]
except KeyError:
print(f'Key {key} is not in the dictionary')
for key in ['final_int_par','final_setup_par','final_time_par','int_error']:
try:
del self.__dict__['re'][key]
except KeyError:
print(f'Key {key} is not in the dictionary')
try:
self.save_project()
print("project converted into new data type and saved again")
except:
print("project converted ibut could not be saved")
self.path=current_path
for key in ['time_bin','rel_wave','rel_time','scattercut','bordercut','timelimits','intensity_range','wave_nm_bin','wavelength_bin','ignore_time_region']:
try:
if isinstance(self.__dict__[key],bytes):
#print(key + ' set to None')
self.__dict__[key]=None
elif isinstance(self.__dict__[key],str):
if self.__dict__[key]=='None':
self.__dict__[key]=None
except:
continue
try:
self.figure_path=str(self.figure_path)
if 'None' in self.figure_path:
self.figure_path=None
except:
pass
def Copy(self):
'''returns a deep copy of the object.
Examples
--------
>>>ta=plot_func.TA('testfile.hdf5') #open a project
>>>ta1=ta.Copy() #make a copy for some tests or a differnet fit
'''
import copy
return copy.deepcopy(self)
def Compare_at_time(self, rel_time = None, other = None, fitted = False, norm_window = None,
time_width_percent = None, spectra = None, data_and_fit = False, cmap = None ,
print_click_position = False, linewidth = 1, title='', plot_second_as_energy = True):
'''This function plots multiple spectra into the same figure at a given rel_time (timepoints) and
allows for normalization. Very useful to compare the spectra for different solvents or quenchers, or
e.g. different fits. The ta.time_width_percent parameter defines if this is a
single time (if time_width_percent = 0) or an integrated window.
Only "rel_time" is a mandatory, the rest can be taken from the original project (ta).
The normalization is realized by giving a norm_window
at which the intensity in the triggering object is integrated (in ta.Compare_at_time(other..)
"ta" is the triggering object. The in each of the other curves the same window is
integrated and the curve scaled by this value. Important to note is that this window
does not need to be in the plot. e.g. the normalization can be done at a different time.
Very often one would like to compare the measured spectra at a certain
time to an external spectrum (e.g. spectro-electro-chemistry or steady
state absorption). This can be done by loading a specific spectrum into
a DataFrame and handing this data Frame to the comparision function. The
function can also be used to plot e.g. the measured spectra vs. an
external spectrum without giving any "other" Projects. (very useful for
comparisions).
Parameters
-------------
rel_time : float or list/vector (of floats)
Specify the times where to plot, single value or list/vector of values.
For each entry in rel_time a spectrum is plotted.
If time_width_percent=0 (Default) the nearest measured
timepoint is chosen. For other values see parameter "time_width_percent".
other : TA object or list of those, optional
should be ta.plot_func objects (loaded or copied) and is what
is plotted against the data use a list [ta1,ta2,... ] or generate this
list using the Gui function. See section :ref:`Opening multiple files` in
the documentation
fitted : bool, optional
True/False (Default) - use fitted data instead of raw data.
If True, the fitted datapoints (without interpolation) are used.
This is intended for comparing e.g. different fits
norm_window : None or list/vector (with 4 floats), optional
norm_window Give a list/tupel/vector with 4 entries in the order
[Start - time, End - time, Start - wavelength, End - Wavelength],
see section :ref:`Normalization and Scaling` in the documentation.
If None (Default) no normalization is done.
linewidth : float, optional
linewidth to be used for plotting
time_width_percent : None or float, optional
"rel_time" and "time_width_percent" work together for creating spectral plots at
specific timepoints. For each entry in rel_time a spectrum is plotted.
If however e.g. time_width_percent=10 the region between the timepoint closest
to :math:`timepoint+0.1xtimepoint´ and :math:`timepoint-0.1xtimepoint` is averaged and shown
(and the legend adjusted accordingly). If None (Default) is given, the value is
taken from the triggering object (self.time_width_percent) This is particularly useful for the densly
sampled region close to t=0. Typically for a logarithmic recorded kinetics, the
timepoints at later times will be further appart than 10 percent of the value,
but this allows to elegantly combine values around time=0 for better statistics.
This averaging is only applied for the plotting function and not for the fits.
spectra : None or DataFrame, optional
If an DataFrame with the wavelength as index is provided, Then the spectra of each column
is plotted into the differential spectra 1-1 and the column names are used in the legend
Prior scaling is highly suggested. These spectra are not (in general) scaled with the
norm window. (see examples).
data_and_fit : bool, optional
True or False (Default), choose if for the Fitted plot the raw data of the
other projects is to be plotting in addition to the fitted line. For False (Default)
Only the fit is plotted.
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
plot_second_as_energy : bool, optional
For (Default) True a second x-axis is plotted with "eV" as unit
print_click_position : bool, optional
if True then the click position is printed for the spectral plots
Examples
----------
>>> import plot_func as pf
>>> ta = pf.TA("test1.hdf5") #open the original project
Now open a bunch of other porjects to comare against
>>> other_projects = pf.GUI_open(project_list = ["file1.SIA", "file2.SIA"])
Typical use is compare the raw data without normalization at 1ps and 6ps.
>>> ta.Compare_at_time(rel_time = [1,6], others = other_project)
Compare the fit withput normalization at 1ps and 6ps.
>>> ta.Compare_at_time(rel_time = [1,6], others = other_project, fitted = True)
Compare with normalization window between 1ps and 2ps and 400nm and 450nm.
>>> norm_window=[1,2,400,450]
>>> ta.Compare_at_time(rel_time = [1,6], others = other_project, norm_window = norm_window)
Compare the spectrum at 1ps and 6ps with an external spectrum.
>>> ext_spec = pd.read_csv("Ascii_spectrum.dat", sep = ",")
>>> ta.Compare_at_time(rel_time = [1,6], spectra = ext_spec)
Use example - Often there are a lot of different measurements to
compare at multiple time. The normlization is performed at the ground state bleach
460 nm and early in time. Then it is better to make a new plot for each
timepoint. The normalization window stays fixed.
>>> plt.close("all") #make some space
>>> norm_window=[0.3,0.5,450,470] #define window in ground state bleach
>>> for t in [0.3,0.5,1,3,10,30]: #iterate over the wavelength
>>> ta.Compare_at_time(rel_time = t, others = other_project, norm_window = norm_window)
'''
if self.save_figures_to_folder:self.figure_path=check_folder(path='result_figures',current_path=self.path)
if time_width_percent is None:time_width_percent=self.time_width_percent
if rel_time is None:rel_time=self.rel_time
if other is not None:
if not hasattr(other,'__iter__'):other=[other]
if rel_time is not None:
if not hasattr(rel_time,'__iter__'):rel_time=[rel_time]
else:
rel_time=[1]
if cmap is None:cmap=self.cmap
if fitted:
try:
re=self.re
except:
print("No fitted results present")
return False
if norm_window is not None:
ref_scale=re['A'].loc[norm_window[0]:norm_window[1],norm_window[2]:norm_window[3]].mean().mean()
fig,ax=plt.subplots(figsize=(10,6),dpi=100)
objects=len(rel_time)*(1+len(other))
colors=colm(cmap=cmap,k=range(objects))
_=plot_time(re['A'], ax = ax, rel_time = rel_time, time_width_percent = time_width_percent,
baseunit = self.baseunit, lines_are = 'data', cmap = colors[:len(rel_time)], title = '', linewidth = linewidth, subplot= True, scattercut = self.scattercut)
_=plot_time(re['AC'], ax = ax, rel_time = rel_time, time_width_percent = time_width_percent,
baseunit = self.baseunit, lines_are = 'fitted', cmap = colors[:len(rel_time)], title = '', subplot = False, linewidth = linewidth, scattercut = self.scattercut)
handles, labels=ax.get_legend_handles_labels()
lab=['%g %s'%(ent,self.baseunit) + '_' + str(self.filename) for ent in rel_time]
han=handles[:len(rel_time)*2]
for ent in rel_time:
lab.append('%g %s fit'%(ent,self.baseunit) + '_' + str(self.filename))
if other is not None:
for i,o in enumerate(other):
try:
re=o.re
except:
print('%s has no fitted results'%o.filename)
continue
if norm_window is not None:
rel_scale=re['A'].loc[norm_window[0]:norm_window[1],norm_window[2]:norm_window[3]].mean().mean()
try:
scaling=(rel_scale/ref_scale)
ax=plot_time(re['AC']/scaling, cmap = colors, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = '', lines_are = 'fitted',
subplot = True, color_offset = len(rel_time)*(i+1), linewidth = linewidth, scattercut = o.scattercut)
handles, labels=ax.get_legend_handles_labels()
for ent in rel_time:
lab.append('%g %s fit'%(ent,o.baseunit) + '_' + str(o.filename))
for a in handles[-len(rel_time):]:
han.append(a)
if data_and_fit:
ax=plot_time(re['A']/scaling, cmap = self.cmap, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = o.filename,
baseunit = self.baseunit, lines_are = 'data', subplot = True,
color_offset = len(rel_time)*(i+1), linewidth = linewidth, scattercut = o.scattercut)
handles, labels=ax.get_legend_handles_labels()
for ent in rel_time:
lab.append('%g %s'%(ent,o.baseunit) + '_' + str(o.filename))
for a in handles[-len(rel_time):]:
han.append(a)
norm_failed = False
except:
print('scaling Failed!')
norm_failed = True
else: norm_failed=True
if norm_failed:
ax=plot_time(re['AC'], cmap = colors, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = '', lines_are = 'fitted',
subplot = True, color_offset = len(rel_time)*(i+1), linewidth = linewidth, scattercut = o.scattercut)
handles, labels=ax.get_legend_handles_labels()
for ent in rel_time:
lab.append('%g %s fit'%(ent,o.baseunit) + '_' + str(o.filename))
for a in handles[-len(rel_time):]:
han.append(a)
if data_and_fit:
ax=plot_time(re['A'], cmap = self.cmap, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = o.filename,
baseunit = self.baseunit, lines_are = 'data', subplot = True,
color_offset = len(rel_time)*(i+1), linewidth = linewidth, scattercut = o.scattercut)
handles, labels=ax.get_legend_handles_labels()
for ent in rel_time:
lab.append('%g %s'%(ent,o.baseunit) + '_' + str(o.filename))
for a in handles[-len(rel_time):]:
han.append(a)
if not norm_failed:
ax.set_title('compare measured and fitted data at given times\n scaled to t=%g ps : %g ps , wl= %g nm: %g nm'%(norm_window[0],norm_window[1],norm_window[2],norm_window[3]))
else:
ax.set_title('compare measured and fitted data at given times')
ax.set_xlim(re['A'].columns.values[0],re['A'].columns.values[-1])
ax.legend(han, lab ,labelspacing = 0, ncol = 2, columnspacing = 1, handlelength = 1, frameon = False)
else:
if norm_window is not None:
ref_scale=self.ds.loc[norm_window[0]:norm_window[1],norm_window[2]:norm_window[3]].mean().mean()
objects=len(rel_time)*(1+len(other))
colors=colm(cmap=cmap,k=range(objects))
fig,ax=plt.subplots(figsize=(10,6),dpi=100)
_=plot_time(self.ds, ax = ax, rel_time = rel_time, time_width_percent = time_width_percent,
title = title, lines_are = 'data', scattercut = self.scattercut, bordercut = self.bordercut,
wave_nm_bin = self.wave_nm_bin, cmap = colors, subplot = True, linewidth = linewidth, baseunit=self.baseunit)
if 1:
_=plot_time(self.ds, ax = ax, rel_time = rel_time, time_width_percent = time_width_percent,
title = title, lines_are = 'smoothed', scattercut = self.scattercut, bordercut = self.bordercut,wave_nm_bin = self.wave_nm_bin, cmap = colors, subplot = False, linewidth = linewidth, baseunit = self.baseunit)
handles, labels=ax.get_legend_handles_labels()
lab=['%g %s'%(ent,self.baseunit) + '_' + str(self.filename) for ent in rel_time]
han=handles[:len(rel_time)]
if other is not None:
for i,o in enumerate(other):
if norm_window is not None:
rel_scale=o.ds.loc[norm_window[0]:norm_window[1],norm_window[2]:norm_window[3]].mean().mean()
try:
scaling = (rel_scale/ref_scale)
ax=plot_time(o.ds/scaling, cmap = colors, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = title, lines_are = 'data',
scattercut = o.scattercut, bordercut = o.bordercut, linewidth = linewidth,
wave_nm_bin = o.wave_nm_bin, subplot = True, color_offset = len(rel_time)*(i+1))
handles, labels=ax.get_legend_handles_labels()
for ent in rel_time:
lab.append('%g %s'%(ent,o.baseunit) + '_' + str(o.filename))
for a in handles[-len(rel_time):]:
han.append(a)
if data_and_fit:
ax=plot_time(o.ds/scaling, cmap = colors, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = title, lines_are = 'smoothed',
scattercut = o.scattercut, bordercut = o.bordercut, linewidth = linewidth,
wave_nm_bin = o.wave_nm_bin, subplot = True, color_offset = len(rel_time)*(i+1))
scaling_failed=False
except:
print('scaling Failed!')
scaling_failed=True
else:
scaling_failed=True
if scaling_failed:
ax=plot_time(o.ds, cmap = colors, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = title, lines_are = 'data',
scattercut = o.scattercut, bordercut = o.bordercut, linewidth = linewidth,
wave_nm_bin = o.wave_nm_bin, subplot = True, color_offset = len(rel_time)*(i+1))
handles, labels=ax.get_legend_handles_labels()
for ent in rel_time:
lab.append('%g %s'%(ent,o.baseunit) + '_' + str(o.filename))
for a in handles[-len(rel_time):]:
han.append(a)
if data_and_fit:
ax=plot_time(o.ds, cmap = colors, ax = ax, rel_time = rel_time,
time_width_percent = time_width_percent, title = title, lines_are = 'smoothed',
scattercut = o.scattercut, bordercut = o.bordercut, linewidth = linewidth,
wave_nm_bin = o.wave_nm_bin, subplot = True, color_offset = len(rel_time)*(i+1))
if not scaling_failed:
ax.set_title('compare measured and smoothed data at given times\n scaled to t=%g ps : %g ps , wl= %g nm: %g nm'%(norm_window[0],norm_window[1],norm_window[2],norm_window[3]))
else:
ax.set_title('compare measured and smoothed data at given times')
ax.legend(han, lab, labelspacing = 0, ncol = 2, columnspacing = 1, handlelength = 1, frameon = False)
if self.bordercut is None:
ax.set_xlim(self.ds.columns.values[0],self.ds.columns.values[-1])
else:
ax.set_xlim(self.bordercut)
if spectra is not None:
spectra.plot(ax=ax,legend=False)
handles, labels=ax.get_legend_handles_labels()
han.append(handles[-1])
lab.append(labels[-1])
ax.legend(han, lab ,labelspacing = 0, ncol = 2, columnspacing = 1, handlelength = 1, frameon = False)
if plot_second_as_energy:
ax2=ax.twiny()
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks(ax.get_xticks())
labels=['%.2f'%(scipy.constants.h*scipy.constants.c/(a*1e-9*scipy.constants.electron_volt)) for a in ax2.get_xticks()]
_=ax2.set_xticklabels(labels)
_=ax2.set_xlabel('Energy in eV')
ax.set_zorder(ax2.get_zorder()+1)
fig=plt.gcf()
fig.tight_layout()
if self.save_figures_to_folder:
fig.savefig(check_folder(path=self.figure_path,filename='compare_at_time_%s.png'%'_'.join(['%g'%a for a in rel_time])),bbox_inches='tight')
def Compare_at_wave(self, rel_wave = None, other = None, fitted = False, norm_window = None,
width = None, cmap = None, data_and_fit = False, scale_type = 'symlog', linewidth = 1):
'''This function plots multiple kinetics into the same figure at one or
multiple given wavelength (rel_wave) and allows for
:ref:`Normalization and Scaling` Very useful to compare the
kinetics for different quencher concentrations or pump powers,
or e.g. different fits. The parameter width or the general self.wavelength_bin
which is used if width is None (Default) defines the width of
the spectral window that is integrated and shown.
A normalization window can be given at which all the plotted curves are normalized to.
This window does not have to be in the plotted region. See :ref:`Normalization and Scaling`
Parameters
--------------
rel_wave : float or list/vector (of floats)
Specify the wavelength where to plot the kinetics, single value or
list/vector of values (only mandatory entry) For each entry in
rel_wave a kinetic is plotted. 'rel_wave' and 'width'
(in the object called 'wavelength_bin' work together for the creation
of kinetic plots. At each selected wavelength the data between
wavelength+width/2 and wavelength-width/2 is averaged
for each timepoint
other : TA object or list of those, optional
should be ta.plot_func objects (loaded or copied) and is what
is plotted against the data use a list [ta1,ta2,... ] or generate this
list using the Gui function. See section :ref:`Opening multiple files` in
the documentation
fitted : bool, optional
True/False (Default) - use fitted data instead of raw data.
If True, the fitted datapoints (without interpolation) are used.
This is intended for comparing e.g. different fits
norm_window : None or list/vector (with 4 floats), optional
norm_window Give a list/tupel/vector with 4 entries in the order
[Start - time, End - time, Start - wavelength, End - Wavelength],
see section :ref:`Normalization and Scaling` in the documentation.
If None (Default) no normalization is done.
width
Specify the width above and below the given wavelength that is
integrated as window. If left to (Default) "None" the value from ta is
used.
data_and_fit : bool, optional
True or False (Default), choose if for the Fitted plot the raw data of the
other projects is to be plotting in addition to the fitted line. For False (Default)
Only the fit is plotted.
linewidth : float, optional
linewidth to be used for plotting
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
Scale_type : None or str
is a general setting that can influences what time axis will be used for the plots.
"symlog" (linear around zero and logarithmic otherwise) "lin" and "log" are valid options.
Examples
--------
>>> import plot_func as pf
>>> ta = pf.TA('test1.hdf5') #open the original project
Now open a bunch of other projects to compare against
>>> other_projects = pf.GUI_open(project_list = ['file1.SIA', 'file2.SIA'])
Typical use:
Compare the raw data without normalization at 400 nm and 500 nm
>>> ta.Compare_at_wave(rel_wave = [400, 500], others = other_project)
Compare the quality of the fit data without normalization at 400 nm and 500 nm
>>> ta.Compare_at_wave(rel_wave = [400, 500], others = other_project, fitted = True)
Compare with normalization window between 1ps and 2ps and 400nm and 450nm
>>> norm_window=[1,2,400,450]
>>> ta.Compare_at_wave(rel_wave = [400, 500], others = other_project, norm_window = norm_window)
Use example: Often there are a lot of different measurements to
compare at multiple wavelength. The normlization is performed at the ground state bleach
460 nm and early in time. Then it is better to make a new plot for each
wavelength. The normalization window stays fixed.
>>> plt.close('all') #make some space
>>> norm_window=[0.3,0.5,450,470] #define window in ground state bleach
>>> for wave in [300,400,500,600,700]: #iterate over the wavelength
>>> ta.Compare_at_wave(rel_wave = wave, others = other_project, norm_window = norm_window)
'''
if self.save_figures_to_folder:self.figure_path=check_folder(path='result_figures',current_path=self.path)
if width is None:width=self.wavelength_bin
if rel_wave is None:
rel_wave=self.rel_wave
if other is not None:
if not hasattr(other,'__iter__'):other=[other]
if not hasattr(rel_wave,'__iter__'):
rel_wave=[rel_wave]
if cmap is None:cmap=self.cmap
if fitted:
try:
re=self.re
except:
print("No fitted results present")
return False
if norm_window is not None:
ref_scale = re['A'].loc[norm_window[0]:norm_window[1], norm_window[2]:norm_window[3]].mean().mean()
fig, ax = plt.subplots(figsize = (10, 6), dpi = 100)
colors = colm(cmap = cmap, k = range(len(rel_wave)*(2+len(other))))
ax = plot1d(re['A'], ax = ax, wavelength = rel_wave, width = width, lines_are = 'data',
cmap = colors, title = '', plot_type = scale_type, linewidth = linewidth)
ax = plot1d(re['AC'], ax = ax, wavelength = rel_wave, width = width, lines_are = 'fitted',
cmap = colors, title = '', subplot = True, plot_type = scale_type, linewidth = linewidth)
#ax = plot1d(re['AC'], ax = ax, wavelength = rel_wave, width = width, lines_are = 'fitted',
# cmap = colors, color_offset = len(rel_wave), title = '', subplot = True, plot_type = scale_type)
hand, labels = ax.get_legend_handles_labels()
lab=['%g nm'%a + '_' + str(self.filename) for a in rel_wave]
for ent in rel_wave:
lab.append('%g nm'%ent + '_' + str(self.filename))
if other is not None:
for i,o in enumerate(other):
i+=1
color_offset=(i+1)*len(rel_wave)
try:
re=o.re
except:
print('%s has no fitted results'%o.filename)
continue
if norm_window is not None:
rel_scale=re['A'].loc[norm_window[0]:norm_window[1],norm_window[2]:norm_window[3]].mean().mean()
try:
scaling=(rel_scale / ref_scale)
if data_and_fit:
ax = plot1d(re['A']/scaling, cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = '', lines_are = 'data', subplot = True, color_offset = color_offset,
plot_type = scale_type, linewidth = linewidth)
ax = plot1d(re['AC']/scaling, cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = '', lines_are = 'fitted', subplot = True, color_offset = color_offset,
plot_type = scale_type, linewidth = linewidth)
except:
print('scaling Failed!')
if data_and_fit:
ax = plot1d(re['A'], cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = '', lines_are = 'data', subplot = True, color_offset = color_offset,
plot_type = scale_type, linewidth = linewidth)
ax = plot1d(re['AC'], cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = '', lines_are = 'fitted', subplot = True, color_offset = color_offset,
plot_type = scale_type, linewidth = linewidth)
else:
if data_and_fit:
ax = plot1d(re['A'], cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = '', lines_are = 'data', subplot = True, color_offset = color_offset,
plot_type = scale_type, linewidth = linewidth)
ax = plot1d(re['AC'], cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = '', lines_are = 'fitted', subplot = True, color_offset = color_offset,
plot_type = scale_type, linewidth = linewidth)
for ent in rel_wave:
if data_and_fit:
lab.append('%g nm'%ent + '_' + str(o.filename))
lab.append('%g nm'%ent + '_' + str(o.filename))
handles, labels=ax.get_legend_handles_labels()
if data_and_fit:
for a in handles[-2*len(rel_wave):]:
hand.append(a)
else:
for a in handles[-len(rel_wave):]:
hand.append(a)
if norm_window is not None:
ax.set_title('compare measured and fitted data at given wavelength \n scaled to t=%g ps : %g ps , wl= %g nm: %g nm'%(norm_window[0],norm_window[1],norm_window[2],norm_window[3]))
else:
ax.set_title('compare measured and fitted data at given wavelength')
ax.set_xlim(re['A'].index.values[0],re['A'].index.values[-1])
ax.legend(hand,lab)
else:
fig, ax = plt.subplots(figsize = (10, 6), dpi = 100)
colors = colm(cmap = cmap, k = range(len(rel_wave)*(2+len(other))))
if norm_window is not None:
ref_scale = self.ds.loc[norm_window[0]:norm_window[1], norm_window[2]:norm_window[3]].mean().mean()
ax = plot1d(self.ds, cmap = colors, ax = ax, wavelength = rel_wave, width = width, title = self.filename,
baseunit = self.baseunit, lines_are = 'data', scattercut = self.scattercut,
bordercut = self.bordercut, subplot = False, color_offset = 0, timelimits = self.timelimits,
intensity_range = self.intensity_range, plot_type = scale_type, linewidth = linewidth)
ax = plot1d(self.ds, cmap = colors, ax = ax, wavelength = rel_wave, width = width, title = self.filename,
baseunit = self.baseunit, lines_are = 'smoothed', scattercut = self.scattercut,
bordercut = self.bordercut, subplot = False, color_offset = 0, timelimits = self.timelimits,
intensity_range = self.intensity_range, plot_type = scale_type, linewidth = linewidth)
if 1:
handles, labels=ax.get_legend_handles_labels()
lab=['%g nm'%a + '_' + str(self.filename) for a in rel_wave]
hand=handles[len(rel_wave):]
if other is not None:
for i,o in enumerate(other):
i+=1
color_offset=(i+1)*len(rel_wave)
if norm_window is not None:
rel_scale=o.ds.loc[norm_window[0]:norm_window[1],norm_window[2]:norm_window[3]].mean().mean()
try:
scaling=(rel_scale/ref_scale)
ax = plot1d(o.ds/scaling, cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = o.filename, baseunit = self.baseunit, timelimits = self.timelimits,
lines_are = 'data', scattercut = self.scattercut, bordercut = self.bordercut,
subplot = True, color_offset = color_offset,
intensity_range = self.intensity_range, plot_type = scale_type, linewidth = linewidth)
ax = plot1d(o.ds/scaling, cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = o.filename, baseunit = self.baseunit, timelimits = self.timelimits,
lines_are = 'smoothed', scattercut = self.scattercut, bordercut = self.bordercut,
subplot = True, color_offset = color_offset,
intensity_range = self.intensity_range, plot_type = scale_type, linewidth = linewidth)
except:
print('scaling failed')
ax = plot1d(o.ds, cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = o.filename, baseunit = self.baseunit, timelimits = self.timelimits,
lines_are = 'data', scattercut = self.scattercut, bordercut = self.bordercut,
subplot = True, color_offset = color_offset,
intensity_range = self.intensity_range, plot_type = scale_type, linewidth = linewidth)
ax = plot1d(o.ds, cmap = colors, ax = ax, wavelength = rel_wave, width = width,
title = o.filename, baseunit = self.baseunit, timelimits = self.timelimits,
lines_are = 'smoothed', scattercut = self.scattercut, bordercut = self.bordercut,
subplot = True, color_offset = color_offset,
intensity_range = self.intensity_range, plot_type = scale_type, linewidth = linewidth)
else:
ax = plot1d(o.ds, cmap = colors, ax = ax, wavelength = rel_wave, width = width, title = o.filename,
baseunit = self.baseunit, timelimits = self.timelimits, lines_are = 'data',
scattercut = self.scattercut, bordercut = self.bordercut, subplot = True,
color_offset = color_offset, intensity_range = self.intensity_range,
plot_type = scale_type, linewidth = linewidth)
ax = plot1d(o.ds, cmap = colors, ax = ax, wavelength = rel_wave, width = width, title = o.filename,
baseunit = self.baseunit, timelimits = self.timelimits, lines_are = 'smoothed',
scattercut = self.scattercut, bordercut = self.bordercut, subplot = True,
color_offset = color_offset, intensity_range = self.intensity_range,
plot_type = scale_type, linewidth = linewidth)
for ent in ['%g nm'%a + '_' + str(o.filename) for a in rel_wave]:
lab.append(ent)
handles, labels=ax.get_legend_handles_labels()
for ha in handles[-len(rel_wave):]:
hand.append(ha)
ax.set_title('compare measured and smoothed data at given wavelength')
if norm_window is not None:
ax.set_title('compare measured and smoothed data at given wavelength \n scaled to t=%g ps : %g ps , wl= %g nm: %g nm'%(norm_window[0],norm_window[1],norm_window[2],norm_window[3]))
ax.legend(hand,lab)
if self.save_figures_to_folder:
fig.savefig(check_folder(path=self.figure_path,filename='compare_at_wave_%s.png'%'_'.join(['%g'%a for a in rel_wave])),
bbox_inches='tight')
return ax
def Compare_DAC(self, other = None, spectra = None, separate_plots = False, cmap = None):
'''This is a convenience function to plot multiple extracted spectra
(DAS or species associated) into the same figure or into a separate figure
each. Other should be ta.plot_func objects (loaded or copied). By
standard it plots all into the same window. If all project have the same
number of components one can activate "separate_plots" and have each
separated (in the order created in the projects).
The "Spectra" parameter allows as before the inclusion of an external
spectrum. Others is optional and I use this function often to compare
species associated spectra with one or multiple steady state spectra.
Parameters
--------------
other : TA object or list of those, optional
should be ta.plot_func objects (loaded or copied) and is what
is plotted against the data use a list [ta1,ta2,... ] or generate this
list using the Gui function. See section :ref:`Opening multiple files` in
the documentation
spectra : None or DataFrame, optional
If an DataFrame with the wavelength as index is provided, Then the spectra of each column
is plotted into the differential spectra 1:1 and the column names are used in the legend
Prior scaling is highly suggested. These spectra are not (in general) scaled with the
norm window. (see examples)
separate_plots : bool, optional
True or False (Default), separate plots is the switch that decides if a axis or
multiple axis are used. This option will result in a crash unless all objects have the
same number of DAS/SAS components
cmap : None or matplotlib color map, optional
is a powerfull variable that chooses the colour map applied for all plots. If set to
None (Default) then the self.cmap is used.
As standard I use the color map "jet" from matplotlib. There are a variety of colormaps
available that are very usefull. Beside "jet", "viridis" is a good choice as it is well
visible under red-green blindness. Other useful maps are "prism" for high fluctuations
or diverging color maps like "seismic".
See https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html for a comprehensive
selection. In the code the colormaps are imported so if plot_func is imported as pf then
self.cmap=pf.cm.viridis sets viridis as the map to use. Internally the colors are chosen
with the "colm" function. The 2d plots require a continuous color map so if something
else is give 2d plots are shown automatically with "jet". For all of the 1d plots however
I first select a number of colors before each plot. If cmap is a continous map then these
are sampled evenly over the colourmap. Manual iterables of colours
cmap=[(1,0,0),(0,1,0),(0,0,1),...] are also accepted, as are vectors or dataframes that
contain as rows the colors. There must be of course sufficient colors present for
the numbers of lines that will be plotted. So I recommend to provide at least 10 colours
(e.g.~your university colors). colours are always given as a, list or tuple with RGA or RGBA
(with the last A beeing the Alpha=transparency. All numbers are between 0 and 1.
If a list/vector/DataFrame is given for the colours they will be used in the order provided.
Examples
--------
>>> import plot_func as pf
>>> ta = pf.TA('test1.hdf5') #open the original project,
>>> this MUST contain a fit, otherwise this will raise an error
Now open a bunch of other projects to compare against,
>>> #compare in a single window
>>> other_projects = pf.GUI_open(project_list = ['file1.hdf5', 'file2.hdf5'])
>>> ta.Compare_DAC(others = other_project)
>>> #comprare in separate windows,
>>> #the other projects must have the same number of components
>>> ta.Compare_DAC(others = other_project, separate_plots = True)
Compare the DAC to an external spectrum
>>> ext_spec = pd.read_csv('Ascii_spectrum.dat', sep = ',')
>>> ta.Compare_DAC(spectra = ext_spec) #compare just the current solution
>>> ta.Compare_DAC(spectra = ext_spec, others = other_project) #compare multiple
'''
if self.save_figures_to_folder:self.figure_path = check_folder(path = 'result_figures', current_path = self.path)
if other is not None:
if not hasattr(other, '__iter__'):other = [other]
try:
re = self.re.copy()
except:
print("No fitted results present")
return False
if cmap is None:cmap = self.cmap
species=re['DAC'].columns.values
if other is None:
col = range(len(re['DAC'].columns.values))
colors = colm(cmap = cmap, k = col)
else:
re['DAC'].columns = [self.filename + '\n' + '%s'%a for a in re['DAC'].columns]
if separate_plots:
colors = colm(cmap = cmap, k = np.arange(len(other)+1))
else:
colors = colm(cmap = cmap, k = np.arange((len(other)+1)*len(species)))
DAC = re['DAC']
hand=[]
if separate_plots:
n_cols = int(np.ceil(len(re['DAC'].columns)/2))
col = [colors[0] for a in range(len(re['DAC'].columns))]
if self.scattercut is None:
ax = DAC.plot(subplots = separate_plots, figsize = (12, 10), layout = (n_cols, 2),
legend = False, color = col, sharex = False)
a=ax.ravel()
handles,labels=a[0].get_legend_handles_labels()
hand.append(handles[-1])
elif isinstance(self.scattercut[0], numbers.Number):
ax = DAC.loc[:self.scattercut[0], :].plot(subplots = separate_plots, figsize = (12, 10), layout = (n_cols, 2),
legend = False, color = col, sharex = False)
a=ax.ravel()
handles,labels=a[0].get_legend_handles_labels()
hand.append(handles[-1])
DAC_cut=DAC.loc[self.scattercut[1]:, :]
for i,am in enumerate(DAC_cut.columns):
DAC_cut.iloc[:,i].plot(ax = a[i], legend = False, color = col)
else:
scattercut = flatten(self.scattercut)
for i in range(len(scattercut)/2+1):
if i == 0:
ax = DAC.loc[:scattercut[0], :].plot(subplots = separate_plots, figsize = (12, 10),
layout = (n_cols, 2), legend = False, color = col, sharex = False)
a=ax.ravel()
handles,labels=a[0].get_legend_handles_labels()
hand.append(handles[-1])
elif i<(len(scattercut)/2):
for j,am in enumerate(ax):
DAC.loc[scattercut[2*i-1]:scattercut[2*i], :].plot(ax = a[j], legend = False, color = col, label = '_nolegend_')
else:
for j,am in enumerate(ax):
DAC.loc[scattercut[-1]:, :].plot(ax = a[j], legend = False, color = col, label = '_nolegend_')
else:
if self.scattercut is None:
ax = DAC.plot(subplots = separate_plots, figsize = (16, 8), legend = False, color = colors[:len(species)], label = '_nolegend_')
elif isinstance(self.scattercut[0], numbers.Number):
ax = DAC.loc[:self.scattercut[0], :].plot(subplots = separate_plots, figsize = (16, 8), legend = False, color = colors[:len(species)], label = '_nolegend_')
ax = DAC.loc[self.scattercut[1]:, :].plot(ax=ax, subplots = separate_plots, figsize = (16, 8), legend = False, color = colors[:len(species)], label = '_nolegend_')
else:
scattercut = flatten(self.scattercut)
for i in range(len(scattercut)/2+1):
if i == 0:
ax = DAC.loc[:scattercut[0], :].plot(subplots = separate_plots, figsize = (16, 8), legend = False, color = colors[:len(species)], label = '_nolegend_')
elif i<(len(scattercut)/2):
ax = DAC.loc[scattercut[2*i-1]:scattercut[2*i], :].plot(ax=ax, subplots = separate_plots, figsize = (16, 8), legend = False, color = colors[:len(species)], label = '_nolegend_')
else:
ax = DAC.loc[scattercut[-1]:, :].plot(ax=ax, subplots = separate_plots, figsize = (16, 8), legend = False, color = colors[:len(species)], label = '_nolegend_')
if other is not None:
for i,o in enumerate(other):
try:
re=o.re.copy()
except:
print('%s has no fitted results'%o.filename)
continue
re['DAC'].columns=[o.filename + '\n' + '%s'%a for a in re['DAC'].columns]
if separate_plots:
col=[colors[i+1] for a in range(len(re['DAC'].columns))]
for j,am in enumerate(re['DAC'].columns):
if o.scattercut is None:
re['DAC'].iloc[:,j].plot(subplots=False,ax=a[j],legend=False,color=col[i])
if j==0:
handles,labels=a[0].get_legend_handles_labels()
hand.append(handles[-1])
elif isinstance(o.scattercut[0], numbers.Number):
re['DAC'].iloc[:,j].loc[:o.scattercut[0]].plot(subplots=False,ax=a[j],legend=False,color=col[i])
if j==0:
handles,labels=a[0].get_legend_handles_labels()
hand.append(handles[-1])
re['DAC'].iloc[:,j].loc[o.scattercut[1]:].plot(subplots=False,ax=a[j],legend=False,color=col[i],label = '_nolegend_')
else:
scattercut = flatten(o.scattercut)
for m in range(len(scattercut)/2+1):
if m == 0:
re['DAC'].iloc[:,j].loc[:scattercut[0]].plot(subplots=False,ax=a[j],legend=False,color=col[i])
if j==0:
handles,labels=a[j].get_legend_handles_labels()
hand.append(handles[-1])
elif m<(len(scattercut)/2):
re['DAC'].iloc[:,j].loc[scattercut[2*m-1]:scattercut[2*m]].plot(subplots=False,ax=a[j],legend=False,color=col[i],label = '_nolegend_')
else:
re['DAC'].iloc[:,j].loc[scattercut[-1]:].plot(subplots=False,ax=a[j],legend=False,color=col[i],label = '_nolegend_')
a[j].set_xlabel('Wavelength in nm')
a[j].set_ylabel('Spectral strength in arb. units')
a[j].legend(fontsize=8,frameon=False)
else:
dacs=len(re['DAC'].columns)
col=colors[(i+1)*dacs:(i+2)*dacs]
DAC=re['DAC']
if o.scattercut is None:
ax = DAC.plot(subplots=separate_plots,ax=ax,legend=False,color=colors[(i+1)*len(species):(i+2)*len(species)])
elif isinstance(o.scattercut[0], numbers.Number):
ax = DAC.loc[:o.scattercut[0], :].plot(subplots=separate_plots,ax=ax,legend=False,color=colors[(i+1)*len(species):(i+2)*len(species)])
DAC.loc[o.scattercut[1]:, :].plot(subplots=separate_plots,ax=ax,legend=False,color=colors[(i+1)*len(species):(i+2)*len(species)])
else:
scattercut = flatten(o.scattercut)
for i in range(len(scattercut)/2+1):
if i == 0:
ax = DAC.loc[:scattercut[0], :].plot(subplots=separate_plots,ax=ax,legend=False,color=colors[(i+1)*len(species):(i+2)*len(species)])
elif i<(len(scattercut)/2):
ax = DAC.loc[scattercut[2*i-1]:scattercut[2*i], :].plot(subplots=separate_plots,ax=ax,legend=False,color=colors[(i+1)*len(species):(i+2)*len(species)])
else:
ax = DAC.loc[scattercut[-1]:, :].plot(subplots=separate_plots,ax=ax,legend=False,color=colors[(i+1)*len(species):(i+2)*len(species)])
ax.set_xlabel('Wavelength in nm')
ax.set_ylabel('Spectral strength in arb. units')
ax.legend(fontsize=8,frameon=False)
if not hasattr(ax,'__iter__'):ax=np.array([ax])
if spectra is not None:
for a in ax:
spectra.plot(ax=a,subplots=separate_plots)
fig=(ax.ravel())[0].figure
if separate_plots:
fig.set_size_inches(12,10)
axes_number=fig.get_axes()
names=[self.filename]
if other is not None:
for o in other:
names.append(o.filename)
for i,ax in enumerate(axes_number):
try:
nametemp=['%s'%species[i] + ' - ' + a for a in names]
ax.legend(hand,nametemp)
except:
pass
else:
ax=fig.get_axes()[0]
names=[self.filename]
if other is not None:
for o in other:
names.append(o.filename)
handles,labels=ax.get_legend_handles_labels()
nametemp=[]
try:
for a in names:
for b in species:
nametemp.append('%s'%b + ' - ' + a)
ax.legend(handles,nametemp)
except:
pass
fig.set_size_inches(16,8)
fig.tight_layout()
if self.save_figures_to_folder:
fig.savefig(check_folder(path=self.figure_path,filename='compare_DAC.png'),bbox_inches='tight') | PypiClean |
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/docs/discussion/axelrods_tournaments.rst | Background to Axelrod's Tournament
==================================
`In the 1980s, professor of Political Science Robert Axelrod ran a tournament inviting strategies from collaborators all over the world for the Iterated Prisoner's Dilemma <http://en.wikipedia.org/wiki/The_Evolution_of_Cooperation#Axelrod.27s_tournaments>`_.
Another nice write up of Axelrod's work and this tournament on github was put together by `Artem Kaznatcheev <https://plus.google.com/101780559173703781847/posts>`_ `here <https://egtheory.wordpress.com/2015/03/02/ipd/>`_.
The Prisoner's Dilemma
----------------------
The `Prisoner's dilemma <http://en.wikipedia.org/wiki/Prisoner%27s_dilemma>`_ is the simple two player game shown below:
+----------+---------------+---------------+
| | Cooperate | Defect |
+==========+===============+===============+
|Cooperate | (3,3) | (0,5) |
+----------+---------------+---------------+
|Defect | (5,0) | (1,1) |
+----------+---------------+---------------+
If both players cooperate they will each go to prison for 2 years and receive an
equivalent utility of 3.
If one cooperates and the other defects: the defector does not go to prison and the cooperator goes to prison for 5 years, the cooperator receives a utility of 0 and the defector a utility of 5.
If both defect: they both go to prison for 4 years and receive an equivalent
utility of 1.
.. note:: Years in prison doesn't equal to utility directly. The formula is U = 5 - Y for Y in [0, 5], where ``U`` is the utility, ``Y`` are years in prison. The reason is to follow the original Axelrod's scoring.
By simply investigating the best responses against both possible actions of each player it is immediate to see that the Nash equilibrium for this game is for both players to defect.
The Iterated Prisoner's Dilemma
-------------------------------
We can use the basic Prisoner's Dilemma as a *stage* game in a repeated game.
Players now aim to maximise the utility (corresponding to years in prison) over a repetition of the game.
Strategies can take in to account both players history and so can take the form:
"I will cooperate unless you defect 3 times in a row at which point I will defect forever."
Axelrod ran such a tournament (twice) and invited strategies from anyone who would contribute.
The tournament was a round robin and the winner was the strategy who had the lowest total amount of time in prison.
This tournament has been used to study how cooperation can evolve from a very simple set of rules.
This is mainly because the winner of both tournaments was 'tit for tat': a strategy that would never defect first (referred to as a 'nice' strategy).
| PypiClean |
/DOMESTIC_CATS-1.0.4-py3-none-any.whl/Domestic-Cats/RFC.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
pd.set_option('display.max_rows', 1000)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
#Read in Table of Merged Features
known_transients = pd.read_csv("Data_Input/Known_Transient_Types.csv", delimiter =",")
firstmag_mean,GL_mean,SGSCORE_mean,color_mean = np.mean(known_transients)
#Random Forest Classifier
feature_names = ['First Mag','Galactic Latitude','SGSCORE','Color']
X=known_transients[feature_names]
y=known_transients['Type'] # Labels
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # 70% training and 30% test
clf=RandomForestClassifier(n_estimators=50)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
feature_imp = pd.Series(clf.feature_importances_,index=feature_names).sort_values(ascending=False)
print (feature_imp)
#print ("Accuracy for training set data only:",metrics.accuracy_score(y_test, y_pred))
# On new unknown data
#Fill NA Values with Mean values from training data
new_transients = pd.read_csv("Data_Output/merge_features/all_features.csv", delimiter =",")
new_transients['First Mag'].fillna((firstmag_mean),inplace = True)
new_transients['Galactic Latitude'].fillna((GL_mean),inplace = True)
new_transients['SGSCORE'].fillna((SGSCORE_mean),inplace = True)
new_transients['Color'].fillna((color_mean),inplace = True)
new_transients.to_csv('Data_Output/merge_features/all_features_fillNA.csv', sep=',',index=False)
#make new prediction
prediction = []
for i in range(0,len(new_transients)):
prediction.append(clf.predict([[new_transients['First Mag'][i],new_transients['Galactic Latitude'][i],new_transients['SGSCORE'][i],new_transients['Color'][i]]])[0])
d = {'ZTF Name': new_transients['ZTF Name'], 'RFC Prediction': prediction}
predictions = pd.DataFrame(data=d)
print (predictions)
predictions.to_csv('Data_Output/predictions.csv', sep=',',index=False) | PypiClean |
/LiveSync-0.2.2-py3-none-any.whl/livesync/folder.py | import asyncio
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
import pathspec
import watchfiles
KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
def run_subprocess(command: str, *, quiet: bool = False) -> None:
result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
if not quiet:
print(result.stdout.decode())
@dataclass(**KWONLY_SLOTS)
class Target:
host: str
port: int
root: Path
def make_target_root_directory(self) -> None:
print(f'make target root directory {self.root}')
run_subprocess(f'ssh {self.host} -p {self.port} "mkdir -p {self.root}"')
class Folder:
def __init__(self, local_dir: Path, target: Target) -> None:
self.local_path = local_dir.resolve() # one should avoid `absolute` if Python < 3.11
self.target = target
# from https://stackoverflow.com/a/22090594/3419103
match_pattern = pathspec.patterns.gitwildmatch.GitWildMatchPattern
self._ignore_spec = pathspec.PathSpec.from_lines(match_pattern, self.get_excludes())
self._stop_watching = asyncio.Event()
@property
def target_path(self) -> Path:
return self.target.root / self.local_path.stem
@property
def ssh_path(self) -> str:
return f'{self.target.host}:{self.target_path}'
def get_excludes(self) -> List[str]:
return ['.git/', '__pycache__/', '.DS_Store', '*.tmp', '.env'] + \
self._parse_ignore_file(self.local_path / '.syncignore') + \
self._parse_ignore_file(self.local_path / '.gitignore')
@staticmethod
def _parse_ignore_file(path: Path) -> List[str]:
if not path.is_file():
return []
with path.open() as f:
return [line.strip() for line in f.readlines() if not line.startswith('#')]
def get_summary(self) -> str:
summary = f'{self.local_path} --> {self.ssh_path}\n'
if not (self.local_path / '.git').exists():
return summary
try:
cmd = ['git', 'log', '--pretty=format:[%h]\n', '-n', '1']
summary += subprocess.check_output(cmd, cwd=self.local_path).decode()
cmd = ['git', 'status', '--short', '--branch']
summary += subprocess.check_output(cmd, cwd=self.local_path).decode().strip() + '\n'
except Exception:
pass # maybe git is not installed
return summary
async def watch(self, on_change_command: Optional[str]) -> None:
try:
async for changes in watchfiles.awatch(self.local_path, stop_event=self._stop_watching,
watch_filter=lambda _, filepath: not self._ignore_spec.match_file(filepath)):
for change, filepath in changes:
print('?+U-'[change], filepath)
self.sync(on_change_command)
except RuntimeError as e:
if 'Already borrowed' not in str(e):
raise
def stop_watching(self) -> None:
self._stop_watching.set()
def sync(self, post_sync_command: Optional[str] = None) -> None:
args = '--prune-empty-dirs --delete -avz --checksum --no-t'
# args += ' --mkdirs' # INFO: this option is not available in rsync < 3.2.3
args += ''.join(f' --exclude="{e}"' for e in self.get_excludes())
args += f' -e "ssh -p {self.target.port}"'
run_subprocess(f'rsync {args} {self.local_path}/ {self.ssh_path}/', quiet=True)
if post_sync_command:
run_subprocess(f'ssh {self.target.host} -p {self.target.port} "cd {self.target_path}; {post_sync_command}"') | PypiClean |
/NeuroTS-3.4.0-py3-none-any.whl/neurots/preprocess/utils.py |
# Copyright (C) 2022 Blue Brain Project, EPFL
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from collections import defaultdict
from copy import deepcopy
from itertools import chain
_REGISTERED_FUNCTIONS = {
"preprocessors": defaultdict(set),
"validators": defaultdict(set),
"global_preprocessors": set(),
"global_validators": set(),
}
def register_global_preprocessor():
"""Register a global preprocess function."""
def inner(func):
_REGISTERED_FUNCTIONS["global_preprocessors"].add(func)
return func
return inner
def register_preprocessor(*growth_methods):
"""Register a preprocess function."""
def inner(func):
for i in growth_methods:
_REGISTERED_FUNCTIONS["preprocessors"][i].add(func)
return func
return inner
def register_global_validator():
"""Register a global validation function."""
def inner(func):
_REGISTERED_FUNCTIONS["global_validators"].add(func)
return func
return inner
def register_validator(*growth_methods):
"""Register a validation function."""
def inner(func):
for i in growth_methods:
_REGISTERED_FUNCTIONS["validators"][i].add(func)
return func
return inner
def preprocess_inputs(params, distrs):
"""Validate and preprocess all inputs."""
params = deepcopy(params)
distrs = deepcopy(distrs)
for preprocess_func in chain(
_REGISTERED_FUNCTIONS["global_validators"],
_REGISTERED_FUNCTIONS["global_preprocessors"],
):
preprocess_func(params, distrs)
for grow_type in params["grow_types"]:
growth_method = params[grow_type]["growth_method"]
for preprocess_func in chain(
_REGISTERED_FUNCTIONS["validators"][growth_method],
_REGISTERED_FUNCTIONS["preprocessors"][growth_method],
):
preprocess_func(params[grow_type], distrs[grow_type])
return params, distrs | PypiClean |
/Md-Notes-api-1.0.0.tar.gz/Md-Notes-api-1.0.0/README.md | # Getting Started with MdNotes
## Getting Started
### Introduction
API for Markdown Notes app.
### Install the Package
The package is compatible with Python versions `2 >=2.7.9` and `3 >=3.4`.
Install the package from PyPi using the following pip command:
```python
pip install Md-Notes-api==1.0.0
```
You can also view the package at:
https://pypi.python.org/pypi/Md-Notes-api
### Initialize the API Client
The following parameters are configurable for the API Client:
| Parameter | Type | Description |
| --- | --- | --- |
| `o_auth_client_id` | `string` | OAuth 2 Client ID |
| `o_auth_redirect_uri` | `string` | OAuth 2 Redirection endpoint or Callback Uri |
| `environment` | Environment | The API environment. <br> **Default: `Environment.PRODUCTION`** |
| `timeout` | `float` | The value to use for connection timeout. <br> **Default: 60** |
| `max_retries` | `int` | The number of times to retry an endpoint call if it fails. <br> **Default: 3** |
| `backoff_factor` | `float` | A backoff factor to apply between attempts after the second try. <br> **Default: 0** |
The API client can be initialized as follows:
```python
from mdnotes.mdnotes_client import MdnotesClient
from mdnotes.configuration import Environment
client = MdnotesClient(
o_auth_client_id='OAuthClientId',
o_auth_redirect_uri='OAuthRedirectUri',
environment = Environment.PRODUCTION,)
```
You must now authorize the client.
### Authorization
Your application must obtain user authorization before it can execute an endpoint call. The SDK uses *OAuth 2.0 Implicit Grant* to obtain a user's consent to perform an API request on user's behalf.
This process requires the presence of a client-side JavaScript code on the redirect URI page to receive the *access token* after the consent step is completed.
#### 1- Obtain user consent
To obtain user's consent, you must redirect the user to the authorization page. The `get_authorization_url()` method creates the URL to the authorization page.
```python
auth_url = client.auth.get_authorization_url()
```
#### 2- Handle the OAuth server response
Once the user responds to the consent request, the OAuth 2.0 server responds to your application's access request by redirecting the user to the redirect URI specified set in `Configuration`.
The redirect URI will receive the *access token* as the `token` argument in the URL fragment.
```
https://example.com/oauth/callback#token=XXXXXXXXXXXXXXXXXXXXXXXXX
```
The access token must be extracted by the client-side JavaScript code. The access token can be used to authorize any further endpoint calls by the JavaScript code.
## Client Class Documentation
### MdNotes Client
The gateway for the SDK. This class acts as a factory for the Controllers and also holds the configuration of the SDK.
### Controllers
| Name | Description |
| --- | --- |
| service | Gets ServiceController |
| user | Gets UserController |
## API Reference
### List of APIs
* [Service](#service)
* [User](#user)
### Service
#### Overview
##### Get instance
An instance of the `ServiceController` class can be accessed from the API Client.
```
service_controller = client.service
```
#### Get Status
```python
def get_status(self)
```
##### Response Type
[`ServiceStatus`](#service-status)
##### Example Usage
```python
result = service_controller.get_status()
```
### User
#### Overview
##### Get instance
An instance of the `UserController` class can be accessed from the API Client.
```
user_controller = client.user
```
#### Get User
```python
def get_user(self)
```
##### Response Type
[`User`](#user-1)
##### Example Usage
```python
result = user_controller.get_user()
```
## Model Reference
### Structures
* [Note](#note)
* [User](#user-1)
* [Service Status](#service-status)
* [O Auth Token](#o-auth-token)
#### Note
##### Class Name
`Note`
##### Fields
| Name | Type | Tags | Description |
| --- | --- | --- | --- |
| `id` | `long|int` | Required | - |
| `title` | `string` | Required | - |
| `body` | `string` | Required | - |
| `user_id` | `long|int` | Required | - |
| `created_at` | `string` | Required | - |
| `updated_at` | `string` | Required | - |
##### Example (as JSON)
```json
{
"id": 112,
"title": "title4",
"body": "body6",
"user_id": 208,
"created_at": "created_at2",
"updated_at": "updated_at4"
}
```
#### User
##### Class Name
`User`
##### Fields
| Name | Type | Tags | Description |
| --- | --- | --- | --- |
| `id` | `int` | Required | - |
| `name` | `string` | Required | - |
| `email` | `string` | Required | - |
| `created_at` | `string` | Required | - |
| `updated_at` | `string` | Required | - |
##### Example (as JSON)
```json
{
"id": 112,
"name": "name0",
"email": "email6",
"created_at": "created_at2",
"updated_at": "updated_at4"
}
```
#### Service Status
##### Class Name
`ServiceStatus`
##### Fields
| Name | Type | Tags | Description |
| --- | --- | --- | --- |
| `app` | `string` | Required | - |
| `moto` | `string` | Required | - |
| `notes` | `int` | Required | - |
| `users` | `int` | Required | - |
| `time` | `string` | Required | - |
| `os` | `string` | Required | - |
| `php_version` | `string` | Required | - |
| `status` | `string` | Required | - |
##### Example (as JSON)
```json
{
"app": "app2",
"moto": "moto8",
"notes": 134,
"users": 202,
"time": "time0",
"os": "os8",
"php_version": "php_version4",
"status": "status8"
}
```
#### O Auth Token
OAuth 2 Authorization endpoint response
##### Class Name
`OAuthToken`
##### Fields
| Name | Type | Tags | Description |
| --- | --- | --- | --- |
| `access_token` | `string` | Required | Access token |
| `token_type` | `string` | Required | Type of access token |
| `expires_in` | `long|int` | Optional | Time in seconds before the access token expires |
| `scope` | `string` | Optional | List of scopes granted<br>This is a space-delimited list of strings. |
| `expiry` | `long|int` | Optional | Time of token expiry as unix timestamp (UTC) |
##### Example (as JSON)
```json
{
"access_token": "access_token8",
"token_type": "token_type2",
"expires_in": null,
"scope": null,
"expiry": null
}
```
### Enumerations
* [O Auth Provider Error](#o-auth-provider-error)
#### O Auth Provider Error
OAuth 2 Authorization error codes
##### Class Name
`OAuthProviderErrorEnum`
##### Fields
| Name | Description |
| --- | --- |
| `INVALID_REQUEST` | The request is missing a required parameter, includes an unsupported parameter value (other than grant type), repeats a parameter, includes multiple credentials, utilizes more than one mechanism for authenticating the client, or is otherwise malformed. |
| `INVALID_CLIENT` | Client authentication failed (e.g., unknown client, no client authentication included, or unsupported authentication method). |
| `INVALID_GRANT` | The provided authorization grant (e.g., authorization code, resource owner credentials) or refresh token is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client. |
| `UNAUTHORIZED_CLIENT` | The authenticated client is not authorized to use this authorization grant type. |
| `UNSUPPORTED_GRANT_TYPE` | The authorization grant type is not supported by the authorization server. |
| `INVALID_SCOPE` | The requested scope is invalid, unknown, malformed, or exceeds the scope granted by the resource owner. |
### Exceptions
* [O Auth Provider](#o-auth-provider)
#### O Auth Provider
OAuth 2 Authorization endpoint exception
##### Class Name
`OAuthProviderException`
##### Fields
| Name | Type | Tags | Description |
| --- | --- | --- | --- |
| `error` | [`OAuthProviderErrorEnum`](#o-auth-provider-error) | Required | Error code |
| `error_description` | `string` | Optional | Human-readable text providing additional information on error.<br>Used to assist the client developer in understanding the error that occurred. |
| `error_uri` | `string` | Optional | A URI identifying a human-readable web page with information about the error, used to provide the client developer with additional information about the error |
##### Example (as JSON)
```json
{
"error": "invalid_request",
"error_description": null,
"error_uri": null
}
```
## Utility Classes Documentation
### ApiHelper
A utility class for processing API Calls. Also contains classes for supporting standard datetime formats.
#### Methods
| Name | Description |
| --- | --- |
| json_deserialize | Deserializes a JSON string to a Python dictionary. |
#### Classes
| Name | Description |
| --- | --- |
| HttpDateTime | A wrapper for datetime to support HTTP date format. |
| UnixDateTime | A wrapper for datetime to support Unix date format. |
| RFC3339DateTime | A wrapper for datetime to support RFC3339 format. |
## Common Code Documentation
### HttpResponse
Http response received.
#### Parameters
| Name | Type | Description |
| --- | --- | --- |
| status_code | int | The status code returned by the server. |
| reason_phrase | str | The reason phrase returned by the server. |
| headers | dict | Response headers. |
| text | str | Response body. |
| request | HttpRequest | The request that resulted in this response. |
### HttpRequest
Represents a single Http Request.
#### Parameters
| Name | Type | Tag | Description |
| --- | --- | --- | --- |
| http_method | HttpMethodEnum | | The HTTP method of the request. |
| query_url | str | | The endpoint URL for the API request. |
| headers | dict | optional | Request headers. |
| query_parameters | dict | optional | Query parameters to add in the URL. |
| parameters | dict | str | optional | Request body, either as a serialized string or else a list of parameters to form encode. |
| files | dict | optional | Files to be sent with the request. |
| PypiClean |
/LFT-0.1.1-py3-none-any.whl/lft/app/node.py | from typing import IO, Dict, Type, OrderedDict
from lft.app.data import DefaultDataFactory
from lft.app.epoch import RotateEpoch
from lft.app.vote import DefaultVoteFactory
from lft.app.network import Network
from lft.app.logger import Logger
from lft.consensus.messages.data import Data
from lft.event import EventSystem, EventMediator
from lft.event.mediators import DelayedEventMediator
from lft.consensus.consensus import Consensus
from lft.consensus.events import RoundStartEvent, RoundEndEvent, InitializeEvent
__all__ = ("Node", )
class Node:
def __init__(self, node_id: bytes):
self.node_id = node_id
self.logger = Logger(node_id).logger
self.event_system = EventSystem(self.logger)
self.event_system.set_mediator(DelayedEventMediator)
self._nodes = None
self._network = Network(self.event_system)
self._consensus = Consensus(
self.event_system,
self.node_id,
DefaultDataFactory(self.node_id),
DefaultVoteFactory(self.node_id)
)
self._epoch_num = -1
self._round_num = -1
# For store
self.commit_datums: OrderedDict[int, Data] = OrderedDict()
self.event_system.simulator.register_handler(InitializeEvent, self._on_init_event)
self.event_system.simulator.register_handler(RoundEndEvent, self._on_round_end_event)
async def _on_init_event(self, init_event: InitializeEvent):
self._nodes = init_event.epoch_pool[-1].voters
async def _on_round_end_event(self, round_end_event: RoundEndEvent):
if round_end_event.is_success and round_end_event.commit_id:
data = self._consensus._data_pool.get_data(round_end_event.commit_id)
self.commit_datums[data.number] = data
if (self._epoch_num, self._round_num) > (round_end_event.epoch_num, round_end_event.round_num):
return
self._epoch_num = round_end_event.epoch_num
self._round_num = round_end_event.round_num + 1
await self._start_new_round()
async def _start_new_round(self):
round_start_event = RoundStartEvent(
epoch=RotateEpoch(1, self._nodes),
round_num=self._round_num
)
round_start_event.deterministic = False
mediator = self.event_system.get_mediator(DelayedEventMediator)
mediator.execute(0.5, round_start_event)
def __del__(self):
self.close()
def close(self):
if self._network:
self._network.close()
self._network = None
if self._consensus:
self._consensus.close()
self._consensus = None
if self.event_system:
self.event_system.close()
self.event_system = None
def start(self, blocking=True):
self.event_system.start(blocking)
def start_record(self, record_io: IO, mediator_ios: Dict[Type[EventMediator], IO]=None, blocking=True):
self.event_system.start_record(record_io, mediator_ios, blocking)
def start_replay(self, record_io: IO, mediator_ios: Dict[Type[EventMediator], IO]=None, blocking=True):
self.event_system.start_replay(record_io, mediator_ios, blocking)
def register_peer(self, peer: 'Node'):
self._network.add_peer(peer._network)
def unregister_peer(self, peer: 'Node'):
self._network.remove_peer(peer._network) | PypiClean |
/FuXi-1.4.production.tar.gz/FuXi-1.4.production/lib/DLP/CompletionReasoning.py | __author__ = 'chimezieogbuji'
import sys
from FuXi.Syntax.InfixOWL import *
from FuXi.DLP import SkolemizeExistentialClasses, \
SKOLEMIZED_CLASS_NS, \
LloydToporTransformation, \
makeRule
from FuXi.Horn.HornRules import HornFromN3
from FuXi.Rete.RuleStore import SetupRuleStore
from FuXi.SPARQL.BackwardChainingStore import TopDownSPARQLEntailingStore
from rdflib.Graph import Graph
from cStringIO import StringIO
from rdflib import plugin,RDF,RDFS,OWL,URIRef,URIRef,Literal,Variable,BNode, Namespace
from FuXi.Horn.HornRules import HornFromN3
LIST_NS = Namespace('http://www.w3.org/2000/10/swap/list#')
KOR_NS = Namespace('http://korrekt.org/')
EX_NS = Namespace('http://example.com/')
EX_CL = ClassNamespaceFactory(EX_NS)
derivedPredicates = [
LIST_NS['in'],
KOR_NS.subPropertyOf,
RDFS.subClassOf,
OWL.onProperty,
OWL.someValuesFrom
]
hybridPredicates = [
RDFS.subClassOf,
OWL.onProperty,
OWL.someValuesFrom
]
CONDITIONAL_THING_RULE=\
"""
@prefix kor: <http://korrekt.org/>.
@prefix owl: <http://www.w3.org/2002/07/owl#>.
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
@prefix list: <http://www.w3.org/2000/10/swap/list#>.
#Rule 4 (needs to be added conditionally - only if owl:Thing appears in the ontology)
{ ?C rdfs:subClassOf ?C } => { ?C rdfs:subClassOf owl:Thing }."""
RULES=\
"""
@prefix kor: <http://korrekt.org/>.
@prefix owl: <http://www.w3.org/2002/07/owl#>.
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>.
@prefix list: <http://www.w3.org/2000/10/swap/list#>.
#ELH completion rules in N3 / RIF / Datalog
{?L rdf:first ?I} => {?I list:in ?L} .
{?L rdf:rest ?R. ?I list:in ?R} => {?I list:in ?L} .
#CTO: Sufficient to assert ?R kor:subPropertyOf ?R for all properties ?R in ontology?
{ ?P1 rdfs:subPropertyOf ?P2 } => { ?P1 kor:subPropertyOf ?P2 } .
#kor:subPropertyOf a owl:TransitiveProperty .
{ ?P1 kor:subPropertyOf ?P2 . ?P2 kor:subPropertyOf ?P3 } => { ?P1 kor:subPropertyOf ?P3 } .
#Rule 1
#rdfs:subClassOf a owl:TransitiveProperty
{ ?C1 rdfs:subClassOf ?C2 . ?C2 rdfs:subClassOf ?C3 } => { ?C1 rdfs:subClassOf ?C3 } .
#Rule 2 (CTO: Different from LL's formulation?)
{ ?C rdfs:subClassOf ?CLASS .
?CLASS owl:intersectionOf ?L .
?D list:in ?L } => { ?C rdfs:subClassOf ?D } .
#Rule 3
{ ?C rdfs:subClassOf ?RESTRICTION .
?RESTRICTION owl:onProperty ?R ;
owl:someValuesFrom ?D } => { ?D rdfs:subClassOf ?D } .
#Rule 5
{ ?C rdfs:subClassOf ?D1, ?D2 .
?D1 list:in ?L .
?D2 list:in ?L .
?E owl:intersectionOf ?L } => { ?C rdfs:subClassOf ?E } .
#Rule 6
{ ?C rdfs:subClassOf ?D .
?E owl:onProperty ?S ;
owl:someValuesFrom ?D
} => { [ a owl:Restriction;
owl:onProperty ?S ;
owl:someValuesFrom ?C ] rdfs:subClassOf ?E } .
#Rule 7
{ ?D rdfs:subClassOf ?RESTRICTION1 .
?RESTRICTION1 owl:onProperty ?R ;
owl:someValuesFrom ?C .
?RESTRICTION2 owl:onProperty ?S ;
owl:someValuesFrom ?C .
?RESTRICTION2 rdfs:subClassOf ?E .
?R kor:subPropertyOf ?S } => { ?D rdfs:subClassOf ?E } .
#Rule 8
{ ?D rdfs:subClassOf ?RESTRICTION1 .
?RESTRICTION1 owl:onProperty ?R ;
owl:someValuesFrom ?C .
?RESTRICTION2 owl:onProperty ?S ;
owl:someValuesFrom ?C .
?RESTRICTION2 rdfs:subClassOf ?E .
?R kor:subPropertyOf ?T .
?T kor:subPropertyOf ?S .
?T a owl:TransitiveProperty } => {
[ a owl:Restriction;
owl:onProperty ?T ;
owl:someValuesFrom ?D ] rdfs:subClassOf ?E } .
"""
LEFT_SUBSUMPTION_OPERAND = 0
RIGHT_SUBSUMPTION_OPERAND = 1
BOTH_SUBSUMPTION_OPERAND = 2
NEITHER_SUBSUMPTION_OPERAND = 3
def WhichSubsumptionOperand(term,owlGraph):
topDownStore=TopDownSPARQLEntailingStore(
owlGraph.store,
owlGraph,
idb=HornFromN3(StringIO(SUBSUMPTION_SEMANTICS)),
DEBUG=False,
derivedPredicates = [OWL_NS.sameAs],
hybridPredicates = [OWL_NS.sameAs])
targetGraph = Graph(topDownStore)
appearsLeft = targetGraph.query(
"ASK { <%s> rdfs:subClassOf [] } ",
initNs={u'rdfs':RDFS.RDFSNS})
appearsRight = targetGraph.query(
"ASK { [] rdfs:subClassOf <%s> } ",
initNs={u'rdfs':RDFS.RDFSNS})
if appearsLeft and appearsRight:
return BOTH_SUBSUMPTION_OPERAND
elif appearsLeft:
return LEFT_SUBSUMPTION_OPERAND
else:
return RIGHT_SUBSUMPTION_OPERAND
def StructuralTransformation(owlGraph,newOwlGraph):
"""
Entry point for the transformation of the given ontology
>>> EX = Namespace('http://example.com/')
>>> EX_CL = ClassNamespaceFactory(EX)
>>> graph = Graph()
>>> graph.bind('ex',EX,True)
>>> Individual.factoryGraph = graph
>>> kneeJoint = EX_CL.KneeJoint
>>> joint = EX_CL.Joint
>>> knee = EX_CL.Knee
>>> isPartOf = Property(EX.isPartOf)
>>> structure = EX_CL.Structure
>>> leg = EX_CL.Leg
>>> hasLocation = Property(EX.hasLocation)
>>> kneeJoint.equivalentClass = [joint & (isPartOf|some|knee)]
>>> legStructure = EX_CL.LegStructure
>>> legStructure.equivalentClass = [structure & (isPartOf|some|leg)]
>>> structure += leg
>>> locatedInLeg = hasLocation|some|leg
>>> locatedInLeg += knee
>>> newGraph,conceptMap = StructuralTransformation(graph)
>>> revDict = dict([(v,k) for k,v in conceptMap.items()])
>>> newGraph.bind('ex',EX,True)
>>> Individual.factoryGraph = newGraph
>>> for c in AllClasses(newGraph):
... if c.identifier in revDict: print "## New concept for %s ##"%revDict[c.identifier]
... print c.__repr__(True)
... print "################################"
"""
FreshConcept = {}
newOwlGraph.bind('skolem',SKOLEMIZED_CLASS_NS,True)
for cls in AllClasses(owlGraph):
ProcessConcept(cls,owlGraph,FreshConcept,newOwlGraph)
return newOwlGraph, FreshConcept
def ProcessConcept(klass,owlGraph,FreshConcept,newOwlGraph):
"""
This method implements the pre-processing portion of the completion-based procedure
and recursively transforms the input ontology one concept at a time
"""
iD = klass.identifier
#maps the identifier to skolem:bnodeLabel if
#the identifier is a BNode or to skolem:newBNodeLabel
#if its a URI
FreshConcept[iD] = SkolemizeExistentialClasses(
BNode() if isinstance(iD,URIRef) else iD
)
#A fresh atomic concept (A_c)
newCls = Class(FreshConcept[iD],graph=newOwlGraph)
cls = CastClass(klass,owlGraph)
#determine if the concept is the left, right (or both)
#operand of a subsumption axiom in the ontology
location = WhichSubsumptionOperand(iD,owlGraph)
print repr(cls)
if isinstance(iD,URIRef):
#An atomic concept?
if location in [LEFT_SUBSUMPTION_OPERAND,BOTH_SUBSUMPTION_OPERAND]:
print "Original (atomic) concept appears in the left HS of a subsumption axiom"
#If class is left operand of subsumption operator,
#assert (in new OWL graph) that A_c subsumes the concept
_cls = Class(cls.identifier,graph=newOwlGraph)
newCls += _cls
print "%s subsumes %s"%(newCls,_cls)
if location in [RIGHT_SUBSUMPTION_OPERAND,BOTH_SUBSUMPTION_OPERAND]:
print "Original (atomic) concept appears in the right HS of a subsumption axiom"
#If class is right operand of subsumption operator,
#assert that it subsumes A_c
_cls = Class(cls.identifier,graph=newOwlGraph)
_cls += newCls
print "%s subsumes %s"%(_cls,newCls)
elif isinstance(cls,Restriction):
if location != NEITHER_SUBSUMPTION_OPERAND:
#appears in at least one subsumption operator
#An existential role restriction
print "Original (role restriction) appears in a subsumption axiom"
role = Property(cls.onProperty,graph=newOwlGraph)
fillerCls = ProcessConcept(
Class(cls.restrictionRange),
owlGraph,
FreshConcept,
newOwlGraph)
#leftCls is (role SOME fillerCls)
leftCls = role|some|fillerCls
print "let leftCls be %s"%leftCls
if location in [LEFT_SUBSUMPTION_OPERAND,BOTH_SUBSUMPTION_OPERAND]:
#if appears as the left operand, we say A_c subsumes
#leftCls
newCls += leftCls
print "%s subsumes leftCls"%newCls
if location in [RIGHT_SUBSUMPTION_OPERAND,BOTH_SUBSUMPTION_OPERAND]:
#if appears as right operand, we say left Cls subsumes A_c
leftCls += newCls
print "leftCls subsumes %s"%newCls
else:
assert isinstance(cls,BooleanClass),"Not ELH ontology: %r"%cls
assert cls._operator == OWL_NS.intersectionOf,"Not ELH ontology"
print "Original conjunction (or boolean operator wlog ) appears in a subsumption axiom"
#A boolean conjunction
if location != NEITHER_SUBSUMPTION_OPERAND:
members = [ProcessConcept(Class(c),
owlGraph,
FreshConcept,
newOwlGraph) for c in cls]
newBoolean = BooleanClass(BNode(),members=members,graph=newOwlGraph)
#create a boolean conjunction of the fresh concepts corresponding
#to processing each member of the existing conjunction
if location in [LEFT_SUBSUMPTION_OPERAND,BOTH_SUBSUMPTION_OPERAND]:
#if appears as the left operand, we say the new conjunction
#is subsumed by A_c
newCls += newBoolean
print "%s subsumes %s"%(newCls,newBoolean)
if location in [RIGHT_SUBSUMPTION_OPERAND,BOTH_SUBSUMPTION_OPERAND]:
#if appears as the right operand, we say A_c is subsumed by
#the new conjunction
newBoolean += newCls
print "%s subsumes %s"%(newBoolean,newCls)
return newCls
def createTestOntGraph():
graph = Graph()
graph.bind('ex',EX_NS,True)
Individual.factoryGraph = graph
kneeJoint = EX_CL.KneeJoint
joint = EX_CL.Joint
knee = EX_CL.Knee
isPartOf = Property(EX_NS.isPartOf)
graph.add((isPartOf.identifier,RDF.type,OWL_NS.TransitiveProperty))
structure = EX_CL.Structure
leg = EX_CL.Leg
hasLocation = Property(EX_NS.hasLocation,subPropertyOf=[isPartOf])
# graph.add((hasLocation.identifier,RDFS.subPropertyOf,isPartOf.identifier))
kneeJoint.equivalentClass = [joint & (isPartOf|some|knee)]
legStructure = EX_CL.LegStructure
legStructure.equivalentClass = [structure & (isPartOf|some|leg)]
structure += leg
structure += joint
locatedInLeg = hasLocation|some|leg
locatedInLeg += knee
# print graph.serialize(format='n3')
# newGraph = Graph()
# newGraph.bind('ex',EX_NS,True)
# newGraph,conceptMap = StructuralTransformation(graph,newGraph)
# revDict = dict([(v,k) for k,v in conceptMap.items()])
# Individual.factoryGraph = newGraph
# for oldConceptId ,newConceptId in conceptMap.items():
# if isinstance(oldConceptId,BNode):
# oldConceptRepr = repr(Class(oldConceptId,graph=graph))
# if oldConceptRepr.strip() == 'Some Class':
# oldConceptRepr = manchesterSyntax(
# oldConceptId,
# graph)
# print "%s -> %s"%(
# oldConceptRepr,
# newConceptId
# )
#
# else:
# print "%s -> %s"%(
# oldConceptId,
# newConceptId
# )
#
# for c in AllClasses(newGraph):
# if isinstance(c.identifier,BNode) and c.identifier in conceptMap.values():
# print "## %s ##"%c.identifier
# else:
# print "##" * 10
# print c.__repr__(True)
# print "################################"
return graph
def GetELHConsequenceProcedureRules(tBoxGraph,useThingRule=True):
owlThingAppears = False
if useThingRule and OWL.Thing in tBoxGraph.all_nodes():
owlThingAppears = True
completionRules = HornFromN3(StringIO(RULES))
if owlThingAppears:
completionRules.formulae.extend(
HornFromN3(StringIO(CONDITIONAL_THING_RULE)))
reducedCompletionRules = set()
for rule in completionRules:
for clause in LloydToporTransformation(rule.formula):
rule = makeRule(clause,{})
# print rule
# PrettyPrintRule(rule)
reducedCompletionRules.add(rule)
return reducedCompletionRules
def SetupMetaInterpreter(tBoxGraph,goal,useThingRule=True):
from FuXi.LP.BackwardFixpointProcedure import BackwardFixpointProcedure
from FuXi.Rete.Magic import SetupDDLAndAdornProgram, PrettyPrintRule
from FuXi.Horn.PositiveConditions import BuildUnitermFromTuple, Exists
from FuXi.Rete.TopDown import PrepareSipCollection
from FuXi.Rete.SidewaysInformationPassing import GetOp
owlThingAppears = False
if useThingRule and OWL.Thing in tBoxGraph.all_nodes():
owlThingAppears = True
completionRules = HornFromN3(StringIO(RULES))
if owlThingAppears:
completionRules.formulae.extend(
HornFromN3(StringIO(CONDITIONAL_THING_RULE)))
reducedCompletionRules = set()
for rule in completionRules:
for clause in LloydToporTransformation(rule.formula):
rule = makeRule(clause,{})
# print rule
# PrettyPrintRule(rule)
reducedCompletionRules.add(rule)
network = SetupRuleStore(makeNetwork=True)[-1]
SetupDDLAndAdornProgram(
tBoxGraph,
reducedCompletionRules,
[goal],
derivedPreds=derivedPredicates,
ignoreUnboundDPreds = True,
hybridPreds2Replace=hybridPredicates)
lit = BuildUnitermFromTuple(goal)
op = GetOp(lit)
lit.setOperator(URIRef(op+u'_derived'))
goal = lit.toRDFTuple()
sipCollection=PrepareSipCollection(reducedCompletionRules)
tBoxGraph.templateMap = {}
bfp = BackwardFixpointProcedure(
tBoxGraph,
network,
derivedPredicates,
goal,
sipCollection,
hybridPredicates=hybridPredicates,
debug=True)
bfp.createTopDownReteNetwork(True)
pprint(reducedCompletionRules)
rt=bfp.answers(debug=True)
pprint(rt)
print >>sys.stderr, bfp.metaInterpNetwork
bfp.metaInterpNetwork.reportConflictSet(True,sys.stderr)
for query in bfp.edbQueries:
print >>sys.stderr, "Dispatched query against dataset: ", query.asSPARQL()
pprint(list(bfp.goalSolutions))
def NormalizeSubsumption(owlGraph):
operands = [(clsLHS,clsRHS)
for clsLHS,p,clsRHS in owlGraph.triples((None,
OWL_NS.equivalentClass,
None))]
for clsLHS,clsRHS in operands:
if isinstance(clsLHS,URIRef) and isinstance(clsRHS,URIRef):
owlGraph.add((clsLHS,RDFS.subClassOf,clsRHS))
owlGraph.add((clsRHS,RDFS.subClassOf,clsLHS))
owlGraph.remove((clsLHS,OWL_NS.equivalentClass,clsRHS))
elif isinstance(clsLHS,URIRef) and isinstance(clsRHS,BNode):
owlGraph.add((clsLHS,RDFS.subClassOf,clsRHS))
owlGraph.remove((clsLHS,OWL_NS.equivalentClass,clsRHS))
elif isinstance(clsLHS,BNode) and isinstance(clsRHS,URIRef):
owlGraph.add((clsRHS,RDFS.subClassOf,clsLHS))
owlGraph.remove((clsLHS,OWL_NS.equivalentClass,clsRHS))
if __name__ == '__main__':
goal = (EX_NS.KneeJoint,
RDFS.subClassOf,
Variable('Class'))
ontGraph = createTestOntGraph()
# ontGraph.add((EX_NS.KneeJoint,
# RDFS.subClassOf,
# EX_NS.KneeJoint))
NormalizeSubsumption(ontGraph)
for c in AllClasses(ontGraph):
print c.__repr__(True)
SetupMetaInterpreter(ontGraph,goal)
# test()
# import doctest
# doctest.testmod() | PypiClean |
/EasyModeler-2.2.6.zip/EasyModeler-2.2.6/emlib/emlib.py |
import sys
import os
import copy
import csv
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
from scipy.integrate import odeint
import scipy
import logging
import datetime
from matplotlib.dates import MONDAY, SATURDAY
from matplotlib.dates import MonthLocator , WeekdayLocator ,DateFormatter, YearLocator, num2date, date2num
from sas7bdat import SAS7BDAT
FORMAT = '%(levelname)s -%(lineno)s- %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
emlog = logging.getLogger('EASYMODEL')
def NuN(value, Type=None):
'''
Tries to convert a string into a float, numpy.nan, or specified type. Returns the conversion or the original string if failed.
:param value: The string to convert.
:param Type: Optinal type to convert value to.
:type value: str
:type Type: float,int,str,...
:returns: value
:rtype: float,numpy.nan,str,...
**Advantages**:
- This function is useful in sanitizing text input files.
**Drawbacks**:
- Does not alert calling function if conversion failed.
:Example:
If value(str) is "NaN" or "None", **NuN** will return *numpy.nan*
>>> toTest = "NaN"
>>> sanitized = NuN(toTest)
>>> numpy.isnan(sanitized)
True
**NuN** will try to convert value (str) into a float. If this is unsuccessful **NuN** will return the string.
>>> a = "3.4"
>>> b = "three point four"
>>> a = NuN(a)
>>> b = NuN(b)
>>> type(a)
<type 'float'>
>>> type(b)
<type 'str'>
Empty strings will be returned as *numpy.nan*. This is useful for importing data tables with missing values for cells.
>>> empty = ''
>>> sanitized = NuN(empty)
>>> print sanitized
nan
>>> numpy.isnan(sanitized)
True
Occasionally we may want to import a series of text values as int instead of float.
>>> string = "5"
>>> integer = NuN(string, Type=int)
>>> type(integer)
<type 'int'>
'''
if not value:
return np.nan
if (value.lower() == 'nan') or (value.lower() == '') or (value.lower() == 'none'):
return np.nan
else:
if Type:
try:
Type(value)
return Type(value)
except:
return value
else:
try:
float(value)
return float(value)
except ValueError:
return value
def mmddyyyy2date(datestr):
'''
Converts mm/dd/yyyy str into a :class:`datetime.date` object
:param datestr: The mm/dd/yyyy string to convert.
:type datestr: str
:returns: date
:rtype: datetime.date
Method converts a date string in the form mm/dd/yyyy into a :class:`datetime.date` object.
Text deliminators are expected in the input string.
:Example:
Converting a datestring to :class:`datetime.date` object:
>>> toTest = "05/20/2013"
>>> date = mmddyy2date(toTest)
>>> print date
2013-05-20
>>> type(date)
<type 'datetime.date'>
'''
date = datetime.date(int(datestr[6:]),int(datestr[:2]),int(datestr[3:5]))
return date
def GFSingle(mean,stdev,model):
'''
Test fitness of single model dT
:param mean: Observation Mean
:type mean: float
:param stdev: Observation STDEV
:type stdev: float
:param model: Simulated value **expected**
:type model: float
:returns: MSE,WMSE,RANGE,MSER
:rtype: float,float,float,float
This is a pattern match program which tests goodness of fit
for asingle point for models against Observation results.
.. note:: This function typically only called by :func:`emlib.GFModel`
'''
obs = mean
obsE = stdev
diff = (abs(obs) - abs(model))
diff2 = diff * diff
#set the stdev to 1 if less for this test
if obsE < 1:
WobsE = 1
else:
WobsE = obsE
WMSE = diff2 / (math.pow(WobsE,2))
MSE = math.pow(((obs - model)),2)
SS = math.pow(((obsE - obs)),2)
adjr2 = MSE/SS
if (model < (obs + obsE)) and (model > (obs - obsE)):
RANGE = 1
MSER = 0
else:
RANGE = 0
if (model < (obs - obsE)):
MSER = math.pow(((obs - obsE) - model),2)
else:
MSER = math.pow(((obs + obsE) - model),2)
#emlog.debug(str((obs - obsE)) + "\t"+str(model)+"\t"+str((obs + obsE))+"\t"+str(RANGE))
return MSE,WMSE,RANGE,MSER,SS, adjr2
def GFModel(model, Observation):
"""Fits Model results to Observation
:param model: Model to test
:param Observation: Historical Observation
:type model: emlib.Model
:type Observation: emlib.Observation
:returns: Fitness object
:rtype: emlib.Fitness
"""
obsT = Observation.T
obsXM = Observation.XM
obsXE = Observation.XE
obsX = 0
obsC = 0
for i in Observation.X:
for k in i:
obsC += 1
obsX += (k * k)
WMSE = 0
MSE = 0
matches = 0
indexobs = 0
RANGE = 0
MSER = 0
O = []
E = []
O_mean = 1 #observed overall mean
E_mean = 1 #expected overall mean
SS_tot = 0 #total sum of squares (expected)
SS_res = 0 #sum of squares of residuals
R2 = 1 #R squared
adjr2 = 0
emlog.debug("-STDEV\tEXP\t+STDEV\tISRANGE?")
for i in obsT:
indexsim = 0
for k in model.computedT:
indexsim+=1 #we are one index ahead
#obs happend at the same exact deltaT of model response
if k == i :
matches+=1
O.append(obsXM[indexobs])
E.append(model.computed[indexsim-1])
a,b,c,d, e, f= GFSingle(obsXM[indexobs],obsXE[indexobs],model.computed[indexsim-1][0])
MSE+=a
WMSE+=b
RANGE+=c
MSER+=d
SS_tot +=e
adjr2 +=f
break
indexobs +=1
WMSE = round(math.sqrt(WMSE),1)
if RANGE > 0: #avoid divide by zero
RANGE = round((100 * float(RANGE)/matches),1)
MSER = round(math.sqrt(MSER),1)
Xtot = obsX/obsC
Xtot= math.sqrt(Xtot)
MSE = math.sqrt(MSE/matches)
RMSD = 1 - (MSE/Xtot)
if RMSD < 0:
RMSD = 0.0
RMSD = round((RMSD * 100),1)
MSE = round(MSE,3)
Xtot = round(Xtot,3)
Xtot= round(math.sqrt(Xtot),3)
emlog.debug("GFMODEL #"+str(matches) +" Xtot"+str(Xtot)+" RMSD%:"+str(RMSD)+" RMSE:"+str(MSE)+" RANGE%"+str(RANGE)+" WMSE:"+str(WMSE))
return Fitness([matches,MSE,WMSE,RANGE,MSER,O,E,RMSD,Xtot])
def EMDraw(GraphOpt,x,y,z=None):
"""
The :func:`matplotlib.plt` wrapper
"""
fig = plt.figure
plt.legend = GraphOpt.labels
plt.set_xlabel = GraphOpt.xlabel
plt.set_ylabel = GraphOpt.ylabel
if GraphOpt.graph == "ts":
plt.plot(x,y)
if GraphOpt.graph == 'fp':
plt.plot(x,y)
if GraphOpt.graph == '3d':
ax = Axes3D(fig)
ax.plot(x,y,z)
plt.show(block=opts.block)
class dtInput:
"""
Internal structure for handling dTinput
"""
def __init__(self,labels,values):
self.values = values
self.labels = labels
def Val(self,label):
index = 0
for i in self.labels:
if i == label:
return self.values[index]
index += 1
emlog.error('dtInput '+label + ' not found in list')
class GraphOpt:
"""
Advanced graphing options to pass to :func:`matplotlib.plt`
"""
_count = 0
def __init__(self):
self.__class__._count +=1
self.title = None
self.labels = []
self.DPI = None
self.mondays = WeekdayLocator(MONDAY)
self.months = MonthLocator()
self.years = YearLocator()
self.monthsFmt = DateFormatter('%d %b %y')
self.linecolors = []
self.linewidths = []
self.xlim = None
self.ylim = None
self.xlabel = None
self.ylabel = None
self.filename = None
self.graph = None
self.block = False
class Calibration:
"""
A collection of :class:`emlib.Coefficient` for a model.
:param coeffs=: list of :class:`emlib.Coefficient`
:param directory=: directory
:param filename=: filename
:type coeffs=: list,emlib.Coefficient
:type directory=: str
:type filename=: str
"""
_count = 0
def __init__(self, coeffs=None, directory=None ,filename=None):
self.__class__._count +=1
self.initial = []
self.ID = self.__class__._count
emlog.info('New Calibration instance: '+str(self.ID))
self.dir = directory
self.filename = filename
self.C = []
if not directory:
self.dir = ""
if filename:
self.Read(filename)
if coeffs:
self.C = coeffs[:]
def Read(self, filename,directory=None):
"""
Read Coefficients from CSV file
:param directory=: directory
:param filename: filename
:type directory=: str
:type filename: str
:Example:
We have a CSV file called bcfile.csv in the working directory.
+----------+---------+--------+--------+---------+------------+
|Label | Value | min | Max | ISConst | Desc |
+==========+=========+========+========+=========+============+
|kbg | 1 | 0.5 | 1 | 0 | growth |
+----------+---------+--------+--------+---------+------------+
|kbm | 0.001 | 0.0001 | .2 | 0 | mortality|
+----------+---------+--------+--------+---------+------------+
|kdd | 1 | 0.05| 3 | 0 | depth mort |
+----------+---------+--------+--------+---------+------------+
|Bcc | 20 | | | 1 |carrying cap|
+----------+---------+--------+--------+---------+------------+
|Ktg | 0.9 | 0.5 | 15 | 0 | |
+----------+---------+--------+--------+---------+------------+
|Sopt | 15 | | | 1 |opt salinity|
+----------+---------+--------+--------+---------+------------+
|Ksg | 8 | 6 | 15 | 0 | |
+----------+---------+--------+--------+---------+------------+
|Ksd | 2.2 | 0.9 | 7 | 0 | |
+----------+---------+--------+--------+---------+------------+
>>> benthosCal = emlib.Calibration()
>>> benthosCal.Read(bcfile.csv)
INFO -243- New Calibration instance: 1
DEBUG -351- C:1 Kbg 1.0
DEBUG -351- C:2 Kbm 0.001
DEBUG -351- C:3 Kdd 1.0
DEBUG -351- C:4 Bcc 20.0
DEBUG -351- C:5 Ktg 0.9
DEBUG -351- C:6 Sopt 15.0
DEBUG -351- C:7 Ksg 8.0
DEBUG -351- C:8 Ksd 2.2
INFO -272- imported C file
"""
self.C = []
if directory:
self.dir = directory
if filename:
self.filename = filename
myspamReader = csv.reader(open(os.path.join(self.dir, self.filename),'rb'), delimiter=',')
firstline = next(myspamReader)
for row in myspamReader:
self.C.append(Coefficient(row[0],val=NuN(row[1]),min=NuN(row[2]),max=NuN(row[3]),isconst=row[4],desc=row[5]))
emlog.info('imported C file')
def Add(self,label,val=None,min=None,max=None,isconst=None,desc=None):
"""
Add a single coefficient to the calibration set
"""
self.C.append(Coefficient(label,val,min,max,isconst,desc))
def Val(self,label):
"""
Returns value of coefficient by label
"""
for i in self.C:
if i.label == label:
return i.var
emlog.error('Coefficient '+label + ' not found in list')
def UpdateC(self,tag,val=None,min=None,max=None,isconst=None,desc=None):
"""
Update an existing Coefficient in the calibration set
"""
for i in self.C:
if i.label == tag:
if val:
i.var = val
if min:
i.min = min
if desc:
i.desc = desc
if max:
i.max = max
if (isconst == "FALSE") or (isconst == 0):
i.isconst = False
else:
i.isconst = True
break
def SetCoeffs(self,Coeffs):
"""
Copy coefficients from array
"""
self.C = coeffs[:]
def Write(self,directory=None,filename=None):
"""
Write coefficients to CSV file. Will overwrite contents if file exists.
"""
if directory:
self.dir = directory
if filename:
self.filename = filename
f = open(self.dir+self.filename, 'wb')
spamWriter = csv.writer(f, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
index = 0
series = ["Label","Value","Min","Max","ISConst","Desc"]
spamWriter.writerow(series)
for i in self.C:
spamWriter.writerow(i.Get())
f.close()
emlog.info('Saved C file')
def Print(self):
"""
Prints :class:`emlib.Calibration` structure to STDOUT
>>> benthosCal.Print()
Label Value Min Max ISConst Desc
Kbg 1.0 0.5 1.0 False growth
Kbm 0.001 0.0001 0.2 False mortality
Kdd 1.0 0.05 3.0 False depth mort
Bcc 20.0 nan nan True carrying cap
Ktg 0.9 0.5 15.0 False
Sopt 15.0 nan nan True opt salinity
Ksg 8.0 6.0 15.0 False
Ksd 2.2 0.9 7.0 False
"""
print "Label\tValue\tMin\tMax\tISConst\tDesc"
for i in self.C:
i.Print()
def GetC(self,tag):
"""
Return a single :class:`emlib.Coefficient` structure by label
:param tag: Coefficient label
:type tag: str
:returns: Coefficient
:rtype: emlib.Coefficient
"""
for i in self.C:
if i.label == tag:
return i
def Randomize(self):
"""
Randomizes all coefficients that have **emlib.Coefficient.isconst** set to *False*
.. seealso:: :class:`emlib.Coefficient.Randomize`
"""
for i in self.C:
i.Randomize()
self.GF = []
def Get(self):
"""
Return a list of all :class:`emlib.Coefficient` objects.
:returns: list of :class:`emlib.Coefficient`
:rtype: list
"""
tmp = []
for i in self.C:
tmp.append(i.var)
return tmp
def GetLabels(self):
"""
Return list of all :class:`emlib.Coefficient` labels
:returns: : list of labels
:rtype: list
"""
tmp = []
for i in self.C:
tmp.append(i.label)
return tmp
class Coefficient:
"""
A single parameter coefficient.
:param label: short description
:param val=: coefficient value
:param min=: minimum value
:param max=: maximum value
:param isconst=: is mutable coefficient?
:param desc=: Long description
:type label: str
:type val=: float
:type min=: float
:type max=: float
:type isconst=: bool
:type desc=: str
"""
_count = 0
def __init__(self,label,val=None,min=None,max=None,isconst=None,desc=None):
self.__class__._count +=1
self.label = label
self.desc = desc
self.var = val
self.min = min
self.max = max
if (isconst):
if (isconst == "False" ) or (isconst == False):
self.isconst = False
else:
self.isconst = bool(isconst)
else:
self.isconst = bool(False)
self.input = 0
self.index = 0
self.ID = self.__class__._count
emlog.debug("C:"+str(self.ID)+" " +self.label+" "+str(self.var)+ " "+ str(self.isconst))
def Randomize(self):
"""
Randomizes coefficient between :class:`emlib.Coefficient.min` and :class:`emlib.Coefficient.max`
using :func:`numpy.random.uniform`
If **Coefficient.isconst** is *True* function returns without randomizing.
.. note:: Why do we need a :mod:`boolean` value :class:`emlib.Coefficient.isconst`? Even though :class:`emlib.Coefficient.min` and :class:`emlib.Coefficient.max` could exist we may want to set this Coefficient to constant dynamically during a calibration algorithm.
"""
if self.isconst:
return
if (self.min and self.max) and (self.min <= self.max):
self.var = np.random.uniform(self.min,self.max)
def SetRange(self,min,max):
"""
Reset our min and max allowable values for Coefficient. This is useful for Monte Carlo algorithms that will tune each coefficient during the calibration process.
:param min=: minimum value
:param max=: maximum value
:type min=: float
:type max=: float
"""
self.min=min
self.max=max
def Print(self):
"""
Prints Coefficient structure to STDOUT.
"""
print self.label,'\t',self.var,'\t',self.min,'\t',self.max,'\t',self.isconst,'\t',self.desc
def Get(self):
"""
Returns entire Coefficient structure as an array list.
:returns: label,var,min,max,isconst,desc
:rtype: list
"""
return [self.label,self.var,self.min,self.max,self.isconst,self.desc]
class Observation:
"""
A series of observations and replicates to validate a model.
:param value:
:param dirname: optinal directory
:param filename: filename
:param fformat: optional file format
:type value: str
:type dirname: str
:type filename: str
:type filename: "csv", "sas"
:returns: Observation Object
:rtype: emlib.Observation
This class object is the generic table structure EasyModeler uses
to handle validation data via tables. This class of data differes from
:class:`emlib.TimeSeries` in that replicates of measurements are made at the same time.
This data is used to :func:`emlib.Model.Validate()` a model to observations.
EasyModeler 2 supports comma separated value files *CSV* and *SAS* 7 binary.
:SAS Example:
- Data is stored in a SAS 7 binary file **testsas.sas7bdat** in the working directory. The salinity observations
of this file will be used to validate a model response.
>>> sasob = emlib.Observation("salinity",filename="testsas.sas7bdat",fformat="sas")
DEBUG -609- New OBS for value:salinity COLMS:15 testsas.sas7bdat
DEBUG -610- [u'date', u'station', u'rep', u'TSS', u'CFTSS', u'Cl_a___g_ltr_', u'NH4___mol_l_', u'Nox___mol_l_', u'SiO4___mol_l_', u'Ophos___mol_l_', u'Temp', u'Depth', u'pH', u'DO_', u'DO_mg_l', u'salinity', u'turbidity__ntu_', u'conductivity']
INFO -645- Read file testsas.sas7bdat 44 Observations for value salinity
- The Observation structure stores the each variable, the mean average, and the STDEV for validation purposes.
>>> sasob.Print()
salinity from testsas.sas7bdat
2011-10-21 M: 40.385 E: 0.095
Values: [40.289999999999999, 40.289999999999999, 40.479999999999997, 40.479999999999997]
:CSV Example:
- Comma Separated Value files are imported by using the **fformat="csv"** switch, or by not using the **fformat=** option.
>>> sasob = emlib.Observation("Cl_a___g_ltr_",filename="testcsv.csv")
INFO -666- Read file testsas.sas7bdat 44 Observations for value salinity
DEBUG -648- New OBS for value:Cl_a___g_ltr_ COLMS:5 testcsv.csv
DEBUG -649- ['date', 'station', 'rep', 'TSS', 'CFTSS', 'Cl_a___g_ltr_', 'NH4___mol_l_', 'Nox___mol_l_', 'SiO4___mol_l_', 'Ophos___mol_l_', 'Temp', 'Depth', 'pH', 'DO_', 'DO_mg_l', 'salinity', 'turbidity__ntu_', 'conductivity']
INFO -666- Read file testcsv.csv 44 Observations for value Cl_a___g_ltr_
>>> sasob.Print()
Cl_a___g_ltr_ from testcsv.csv
2011-10-21 M: 4.465 E: 0.429563732175
Values: [4.7999999999999998, 4.9699999999999998, 3.9500000000000002, 4.1399999999999997]
"""
_count = 0
def __init__(self,value,dirname=None,filename=None,fformat=None):
self.__class__._count += 1
self.label = value
self.T = []
self.X = []
self.XM = []
self.XE = []
self.ID = self.__class__._count
self.dir = dirname
self.filename = filename
if not dirname:
self.dir = ""
sasreader = []
if (fformat == 'sas'):
with SAS7BDAT(os.path.join(self.dir, self.filename)) as f:
for row in f:
sasreader.append(row)
firstline = sasreader[0]
emlog.debug(firstline)
emlog.debug("Searching for "+self.label )
col = firstline.index(self.label) #setup the value of interest
for row in sasreader[1:]:
date = row[0]
if date in self.T: # if we already have the same date then insert new obs
if row[col] != '': #only insert if there is a value
self.X[len(self.T)-1].append(row[col])
else: #else we make a new obsT
newlist = []
if row[col] != '':
newlist.append(row[col])
self.T.append(date)
self.X.append(newlist)
else:
myspamReader = csv.reader(open(os.path.join(self.dir, self.filename),'rb'), delimiter=',')
firstline = next(myspamReader)
emlog.debug(firstline)
col = firstline.index(self.label) #setup the value of interest
emlog.debug("New OBS for value:"+str(self.label)+" COLMS:"+str(col)+" "+str(self.dir)+str(self.filename))
for row in myspamReader:
date = datetime.datetime.combine(mmddyyyy2date(row[0]),datetime.time(0,0))
if date in self.T: # if we already have the same date then insert new obs
if row[col] != '': #only insert if there is a value
self.X[len(self.T)-1].append(NuN(row[col]))
else: #else we make a new obsT
newlist = []
if row[col] != '':
newlist.append(NuN(row[col]))
self.T.append(date)
self.X.append(newlist)
for i in self.X:
self.XM.append(np.mean(i)) #mean value table
self.XE.append(np.std(i)) #stdev values
emlog.info( "Read file "+self.dir+self.filename+" "+str(len(self.X))+" Observations for value "+self.label)
def Draw(self, block=True):
"""
Plot Observations
:param block: Blocking or non-blocking
:type bool: bool
Simple matplotlib plotting wrapper
"""
plt.figure()
plt.suptitle(self.filename)
plt.plot(self.T,self.XM, 'ro', color='grey')
plt.errorbar(self.T,self.XM, yerr=self.XE, color='grey',fmt='o', linewidth=1.4)
plt.legend([self.label])
plt.show(block=block)
def Print(self):
index = 0
print self.label, " from ", self.dir + self.filename
for i in self.T:
print i, "M: ",self.XM[index], "E:", self.XE[index]
print "Values:\t\t", self.X[index]
index+=1
class TimeSeries:
"""
A series of data in time.
:param dirname: optinal directory
:param filename: filename
:param fformat: optional file format
:type dirname: str
:type filename: str
:type filename: "csv", "sas"
:returns: TimeSeries Object
:rtype: emlib.TimeSeries
This class object is the generic table structure EasyModeler uses
to handle dtInput data via tables. This class of data differes from
:class:`emlib.Observation` in that measurements are discrete: only one measurement of a variable is
made at a specific time. This data is used to feed a :class:`emlib.Model` with dtInput data. For validating
model responses use :class:`emlib.Observation` .
EasyModeler 2 supports comma separated value files *CSV* and *SAS* 7 binary.
For CSV files the first row includes the header labels and first column is datetime
in the form mm/dd/yyyy. Future planned expansions will increase this functionality.
For SAS files the first column is a SAS datetime object.
:CSV Example:
- You have a table of data in the form of a .CSV file stored as **/mydata/monthlyphysical.csv**.
Some of the cells may contain empty *Null* strings:
+----------+---------+--------+
|DATE | SALINITY| TEMP |
+==========+=========+========+
|01/20/2013| 30.2 | 22.5 |
+----------+---------+--------+
|02/19/2013| 20.2 | 15.3 |
+----------+---------+--------+
|03/20/2013| | 24.2|
+----------+---------+--------+
- Creating the TimeSeries object::
>>> myData = TimeSeries(dirname="mydata",filename="monthlyphysical.csv")
DEBUG -202- New INPUT table mydata\monthlyphysical.csv['DATE', 'SALINITY', 'TEMP']
DEBUG -212- Saved 3 rows and 2 columns
DEBUG -214- Converted dates to contiguous np.array
DEBUG -216- Converted input data to contiguous np.array
- EasyModeler separates time and data arrays as a design decision. EasyModeler converts time to :mod:datetime objects. To access the date array use the member **.T** ::
>>> print myData.T
[2013-01-20 2013-02-19 2013-3-20]
:Missing Values:
EasyModeler coverts blank *missing* values in data streams as :class:`numpy.nan` objects. This is advantageous for plotting and numerical operations.
Each non-date cell is passed to :func:`emlib.NuN` for conversion to :func:`float` values.
.. seealso:: For more information about how :func:`emlib.NuN` handles empty strings and numerical conversions see it's documentation.
:SAS Example:
- File baywater.sas7bat is a SAS binary file stored in the working directory. In SAS 9.3 a snippet of the table view is:
+----------+---------+--------+
|DATE | SALINITY| TEMP |
+==========+=========+========+
|21OCT2011 | 40.29 | 23.03 |
+----------+---------+--------+
|02NOV2011 | 20.2 | 15.3 |
+----------+---------+--------+
|09NOV2011 | | 24.2|
+----------+---------+--------+
- Creating the TimeSeries object::
>>> myData = TimeSeries(filename="baywater.sas7bat", fformat="sas")
INFO -748- New TimeSeries instance: 1
DEBUG -778- New INPUT table testsas.sas7bdat[u'date', u'station', u'rep', u'TSS', u'CFTSS', u'Cl_a___g_ltr_', u'NH4___mol_l_', u'Nox___mol_l_', u'SiO4___mol_l_', u'Ophos___mol_l_', u'Temp', u'Depth', u'pH', u'DO_', u'DO_mg_l', u'salinity', u'turbidity__ntu_', u'conductivity']
DEBUG -805- Saved 177 rows and 17 columns
DEBUG -807- Converted dates to contiguous np.array
DEBUG -809- Converted input data to contiguous np.array
"""
_count = 0
def __init__(self,dirname=None,filename=None, fformat="csv"):
self.__class__._count += 1
self.ID = self.__class__._count
emlog.info('New TimeSeries instance: '+str(self.ID))
self.dir = dirname
self.filename = filename
self.fformat = fformat
self.labels = []
if not dirname:
self.dir = ""
if filename:
self._Read()
def _Read(self, filename=None,directory=None, fformat=None):
self.Rows = []
self.labels = []
self.T = []
self.sastmp = []
if directory:
self.dir = directory
if filename:
self.filename = filename
if fformat:
self.fformat = fformat
if self.fformat == "sas":
with SAS7BDAT(os.path.join(self.dir, self.filename)) as f:
for row in f:
self.sastmp.append(row)
self.labels = self.sastmp[0]
emlog.debug("New INPUT table "+str(self.dir)+str(self.filename)+str(self.labels))
col = 0
hastime = 0
for row in self.sastmp[1:]:
myrow = []
if type(row[1]) == datetime.time:
hastime = 1
self.T.append(datetime.datetime.combine(row[0], row[1]))
for i in range(len(self.labels))[2:]:
if type(i) == None:
print "found none"
myrow.append(np.nan)
else:
myrow.append(row[i])
else:
self.T.append(row[0])
for i in range(len(self.labels))[1:]:
if type(i) == None:
print "found none"
myrow.append(np.nan)
else:
myrow.append(row[i])
newrow = []
for i in myrow:
if i == None:
newrow.append(np.nan)
else:
newrow.append(i)
self.Rows.append(newrow)
del self.labels[0]
if hastime:
del self.labels[0]
del self.sastmp
if self.fformat == "csv":
myspamReader = csv.reader(open(os.path.join(self.dir, self.filename),'rb'), delimiter=',')
self.labels = next(myspamReader)
emlog.debug("New INPUT table "+str(self.dir)+str(self.filename)+str(self.labels))
for row in myspamReader:
self.T.append(mmddyyyy2date(row[0]))
myrow = []
for i in range(len(self.labels)):
if i == 0:
continue
myrow.append(NuN(row[i]))
self.Rows.append(myrow)
del self.labels[0]
emlog.debug("Saved "+str(len(self.T))+" rows and "+str(len(self.labels))+" columns")
self.T = np.ascontiguousarray(self.T, dtype=object)
emlog.debug("Converted dates to contiguous np.array")
self.Rows = np.ascontiguousarray(self.Rows, dtype=object)
emlog.debug("Converted input data to contiguous np.array")
def Draw(self, block=True):
"""
Plot TimeSeries
:param block: Blocking or non-blocking
:type bool: bool
Simple matplotlib plotting wrapper
"""
plt.figure()
plt.plot(self.T,self.Rows)
plt.legend(self.labels)
plt.suptitle(self.filename)
plt.show(block=block)
def Print(self,column=None):
"""
Prints entire TimeSeries, or column, to **STDOUT**.
"""
if column:
try:
self.labels.index(column)
except ValueError:
emlog.warn(str(column)+" not in table. Try:"+str(self.labels))
return
col = self.labels.index(column)
print "Date\t"+column
for i in range((len(self.T))):
print self.T[i],"\t",self.Rows[i][col]
else:
for i in range((len(self.T))):
print self.T[i],"\t",self.Rows[i]
def GetLabels(self):
"""
Simple procedure to get array of string labels
:returns: list
:rtype: str
:Example:
- Simple print::
>>> print myTable.GetLabels()
['SALINITY', 'TEMP']
"""
return self.labels
def Get(self,columnLabel):
"""
Return a column as array.
:param columnLabel: The column to return
:type param: str
:returns: list
:rtype: float,np.Nan,str,...
:Example:
- Simple grab::
>>> salinity = myTable.Get("SALINITY")
"""
try:
self.labels.index(columnLabel)
except ValueError:
emlog.warn(str(columnLabel)+" not in table. Try:"+str(self.labels))
return []
col = self.labels.index(columnLabel)
tmp = []
for i in range((len(self.T))):
tmp.append(self.Rows[i][col])
return tmp
class Model:
"""
Class method creates a new ODE model structure.
:param ODEFunction: The ODE code function to be integrated.
:param jacobian: Optional jacobian matrix
:param algorithm: Optional integration algorithm, default *Vode*
:param method: Optional algorithm method type, default *bdf*
:param order: Optinal inegrator order, default *13*
:param nsteps: Optional integrator internal steps, default *3000*
:type ODEFunction1: Python function
:type jacobian: jacobian array
:type algorithm: str
:type method: str
:type order: int
:type nsteps: int
:returns: Model object
:rtype: emlib.Model
:Example:
- First declare an ODE_INT function. This will be passed to the :func:`scipy.integrate.odeint` integrator::
def LV_int(initial, dtinput, constants):
x = initial[0]
y = initial[1]
A = 1
B = 1
C = 1
D = 1
x_dot = (A * x) - (B * x *y)
y_dot = (D * x * y) - (C * y)
return [x_dot, y_dot]
.. seealso:: For help creating ODE_INT functions see :mod:`scipy.integrate`
.. warning:: Use logical operators with caution inside the ODE function. Declaring a derivative *_dot* after a conditional will yield unpredictable results.
- Pass the ODE function to :class:`emlib.Model` as::
>>> myModel = emlib.Model(LV_int)
"""
_count = 0
def __init__(self,ODEFunction,jacobian=None,algorithm=None,method=None,order=None,nsteps=None,dt=None):
self.__class__._count += 1
self.ID = self.__class__._count
self.dt = 1
self.myodesolve = scipy.integrate.ode(ODEFunction, jac=jacobian)
emlog.info('New Model('+str(self.ID)+"): "+ODEFunction.__name__)
if jacobian:
emlog.debug('Jaccobian loaded')
if method:
self.method = method
else:
self.method = 'bdf'
if algorithm:
self.algorithm = algorithm
else:
self.algorithm = 'vode'
if not method and not algorithm:
emlog.info('No algorithm supplied assuming vode/bfd O12 Nsteps3000 dt1')
if order:
self.order = order
else:
self.order = 12
if nsteps:
self.nsteps = nsteps
else:
self.nsteps = 3000
if dt:
self.dt = dt
else:
self.dt = 1
self.myodesolve.set_integrator(self.algorithm, method=self.method, order=self.order,nsteps=self.nsteps)
emlog.debug('Integrator:'+self.algorithm+"/"+self.method+" order:"+str(self.order)+" nsteps:"+str(self.nsteps)+" dt:"+str(self.dt))
def Integrate(self,initial,maxdt=None,Calibration=None,TimeSeries=None,start=None,end=None, dt=None):
computed = []
computedT = []
self.myodesolve.set_initial_value(initial,0)
emlog.debug("ODEINT Initials:"+"".join(map(str,initial)))
if dt:
self.dt = dt
if TimeSeries and start:
s = np.where(TimeSeries.T==start)
if s == []:
emlog.error("Supplied Start " + str(s[0]) + "does not exist, assuming 0")
s = 0
else:
s = s[0][0]
else:
s = 0
if TimeSeries and end:
e = np.where(TimeSeries.T==end)
if not e[0]:
e = len(TimeSeries.T) - 1
emlog.error("Supplied End does not exist, assuming "+str(TimeSeries.T[e]))
else:
e = e[0][0]
if TimeSeries and maxdt:
e = maxruns + s
if e > len(TimeSeries.T) - 1:
e = len(TimeSeries.T) - 1
emlog.error("Maxruns > input ending, assuming "+str(TimeSeries.T[e]))
if not TimeSeries:
if maxdt:
e = maxdt * int(1 / self.dt) + s
else:
emlog.error("No maxruns specified, exiting!")
return
if TimeSeries and (start is None) and (end is None) :
print "here", start, end
s = 0
e = len(TimeSeries.T)
emlog.debug("Starting:"+str(TimeSeries.T[s])+" Ending:"+str(len(TimeSeries.T)))
emlog.debug("Passing DtInput:"+str(TimeSeries.GetLabels()))
else:
emlog.debug("Ending in "+str(e)+" runs")
if Calibration:
emlog.debug("Passing Cs:"+str(Calibration.GetLabels()))
tcount = 0
for i in range(s,e,1):
#print s, e, i
if TimeSeries and Calibration:
self.myodesolve.set_f_params(dtInput(TimeSeries.labels,TimeSeries.Rows[i]),Calibration)
elif TimeSeries and not Calibration:
self.myodesolve.set_f_params(dtInput(TimeSeries.labels,TimeSeries.Rows[i]),None)
elif Calibration and not TimeSeries:
self.myodesolve.set_f_params(None,Calibration)
elif not Calibration and not TimeSeries:
self.myodesolve.set_f_params(None,None)
self.myodesolve.integrate(self.myodesolve.t + self.dt)
self.myodesolve.set_initial_value(self.myodesolve.y,self.myodesolve.t)
if ((tcount % 500) == 0):
emlog.debug( "Integration dT:"+str(tcount)+" of "+str(e - s)+" Remaining:"+str(e - s - tcount))
tcount+=1
if TimeSeries:
computedT.append(TimeSeries.T[i])
else:
computedT.append(i+s)
computed.append(self.myodesolve.y)
self.computed = np.ascontiguousarray(computed)
self.computedT = computedT
emlog.debug("Completed Integration, created np.array shape:"+str(self.computed.shape))
return
def Draw(self, block=True,graph='ts',order=None):
"""
Plot Computed Series
:param block: Blocking or non-blocking
:type bool: bool
Simple matplotlib plotting wrapper
"""
if graph == 'ts':
plt.figure()
plt.suptitle("Computed Integral")
plt.plot(self.computedT,self.computed)
plt.show(block=block)
if graph == 'fp':
plt.figure()
plt.suptitle("Computed Integral")
if order:
plt.plot(self.computed[:,int(order[0])],self.computed[:,int(order[1])])
else:
plt.plot(self.computed[:,0],self.computed[:,1])
plt.show(block=block)
if graph == '3d':
fig = plt.figure()
fig = plt.figure()
ax = Axes3D(fig)
fig.suptitle("Computed Integral")
if order:
ax.plot(self.computed[:,int(order[0])],self.computed[:,int(order[1])],self.computed[:,int(order[2])],label="3D Plot")
else:
ax.plot(self.computed[:,0],self.computed[:,1],self.computed[:,2],label="3D Plot")
plt.show(block=block)
def Validate(self,Observation,graph=False):
"""
Validate model output to observed data
:param Observation: The Observation class
:type Observation: emlib.Observation
:returns: fitness object
:rtype: emlib.Fitness
This function is a wrapper for the functions :func:`emlib.GFModel` and :func:`emlib.GFSingle` .
Model simulation output is tested against historical Observations. A series of Goodness of Fit statistics are returned as an :class:`emlib.Fitness` structure.
:Example:
>>> Model.Integrate(calibration.initial,
Calibration=calibration)
.. note:: Model is assumed to be integrated via :func:`Model.Integrate` and results stored in Model.computed
"""
self.fit = GFModel(self,Observation)
if graph:
plt.figure()
plt.suptitle("Computed Integral")
plt.plot(self.computedT,self.computed)
plt.plot(Observation.T,Observation.XM, 'ro', color='grey')
plt.errorbar(Observation.T,Observation.XM, yerr=Observation.XE, color='grey',fmt='o', linewidth=1.4)
plt.show()
return self.fit
def Calibrate(self,Calibration,Observation,runs=None,TimeSeries=None,Algorithm=None,start=None,end=None,dt=None):
"""
Wrapper to calibrate model via supplied Monte Carlo algorithm.
:param Calibration: Model Coefficients
:type Calibration: emlib.Calibration
:param Observation: What really happend
:type Observation: emlib.Observation
:param maxruns: Maximum times to integrate
:type maxruns: int
:param TimeSeries: Optional dtInput Table
:type TimeSeries: emlib.TimeSeries
:param Algorithm: Calibration Function
:type Algorithm: **func**
:param start: Optinal simulation start
:type start: datetime.date,int
:param end: optional simulation end
:type end: datetime.date,int
:returns: Model Calibration
:rtype: emlib.Calibration
This function will integrate the current model *maxruns* times using the supplied **Algorithm**. If no algorithm is supplied :func:`GF_BruteForceMSE` is assumed.
:Example:
>>> bestCalibration = Model.Calibrate(startingCalibration,
Observation, runs=5000)
.. note:: Supplying a large *maxruns* may hang the terminal while the calibrator executes. Using CTRL+C will break out of the program but all progress calibrating will be lost.
"""
if not Algorithm:
emlog.warn("No fitness method provided, assuming GF_BruteForceMSE")
return GF_BruteForceMSE(self,Calibration,Observation,runs,TimeSeries,start,end,dt)
else:
emlog.debug("Applying fitness function:"+str(Algorithm))
return Algorithm(self,Calibration,Observation,runs,TimeSeries,start,end,dt)
def GF_BruteForceMSE(Model,Calibration,Observation,maxruns,TimeSeries=None,start=None,end=None,dt=None):
testingC = copy.deepcopy(Calibration)
Model.Integrate(testingC.initial,Calibration=testingC, TimeSeries=TimeSeries, start=start, end=end)
GF = Model.Validate(Observation)
bestMSE = GF.MSE
for i in range(maxruns-1):
testingC.Randomize()
Model.Integrate(testingC.initial,Calibration=testingC, TimeSeries=TimeSeries, start=start, end=end,dt=dt)
GF = Model.Validate(Observation)
if GF.MSE < bestMSE:
print "New Best Calibration"
Calibration = copy.deepcopy(testingC)
bestMSE = GF.MSE
return Calibration
def GF_BruteForceMSERANGE(Model,Calibration,Observation,maxruns,TimeSeries=None,start=None,end=None,dt=None):
testingC = copy.deepcopy(Calibration)
Model.Integrate(testingC.initial,Calibration=testingC, TimeSeries=TimeSeries, start=start, end=end,dt=dt)
GF = Model.Validate(Observation)
bestMSE = GF.MSE
bestRANGE = GF.RANGE
for i in range(maxruns-1):
testingC.Randomize()
Model.Integrate(testingC.initial,Calibration=testingC, TimeSeries=TimeSeries, start=start, end=end)
GF = Model.Validate(Observation)
if (GF.MSE < bestMSE) and (GF.RANGE > bestRANGE) :
emlog.info("New Best Calibration")
Calibration = copy.deepcopy(testingC)
bestMSE = GF.MSE
GF.Print()
return Calibration
def GF_BruteForceRMSD(Model,Calibration,Observation,maxruns,TimeSeries=None,start=None,end=None,dt=None):
testingC = copy.deepcopy(Calibration)
Model.Integrate(testingC.initial,Calibration=testingC, TimeSeries=TimeSeries, start=start, end=end,dt=dt)
GF = Model.Validate(Observation)
bestRMSD = GF.RMSD
orgRMSD = GF.RMSD
for i in range(maxruns-1):
testingC.Randomize()
Model.Integrate(testingC.initial,Calibration=testingC, TimeSeries=TimeSeries, start=start, end=end)
GF = Model.Validate(Observation)
if (GF.RMSD > bestRMSD) :
print ("New Best Calibration:" +str(GF.RMSD) + " prev:" + str(bestRMSD) + " orig:" +str(orgRMSD))
Calibration = copy.deepcopy(testingC)
bestRMSD = GF.RMSD
else:
emlog.info("Int:" +str(i) + " RMSD Current: "+ str(GF.RMSD) + " Best:" + str(bestRMSD) + " Orig:" +str(orgRMSD))
return Calibration
class Fitness:
"""
Goodness of Fit Structure
:param fit: list of fitness measurements
:type fit: list
:Attributes:
* *Fitness.matches* Number of fitness values
* *Fitness.MSE* Mean Square Error
* *Fitness.WMSE* Weighted Mean Square Error
* *Fitness.RANGE* % Inside STDEV
* *Fitness.MSER* Mean Square Error outside STDEV
* *Fitness.O* list of observed means
* *Fitness.E* list of expected values
This is an internal :mod:`emlib` structure for housing Goodness of Fit statistics.
"""
_count = 0
def __init__(self,fit):
self.__class__._count += 1
self.ID = self.__class__._count
self.matches = fit[0]
self.MSE = fit[1]
self.WMSE = fit[2]
self.RANGE = fit[3]
self.MSER = fit[4]
self.O = fit[5]
self.E = fit[6]
self.RMSD = fit[7]
self.Xtot = fit[8]
emlog.debug("New fitness object:"+str(self.ID))
def Print(self):
"""
Print all statistics to STDOUT
"""
print("GFMODEL #"+str(self.matches)+"Xtot:"+str(self.Xtot)+" RMSD:"+str(self.RMSD)+" RMSE:"+str(self.MSE)+" RANGE%"+str(self.RANGE)+" MSER:"+str(self.MSER)+" WMSE:"+str(self.WMSE)) | PypiClean |
/FullContact-AIO-0.0.8.tar.gz/FullContact-AIO-0.0.8/README.md | FullContact.py
==============
[](https://badge.fury.io/py/FullContact-AIO)
[](https://travis-ci.org/fullcontact/fullcontact.py)
A Python interface for the [FullContact API](http://docs.fullcontact.com/).
Installation
------------
```
pip install FullContact-AIO
```
Usage
-----
```python
import asyncio
from fullcontact_aio import FullContact
async def get_person_by_email():
fc = FullContact('xgtbJvVos2xcFMX1JvXaQvx0ZaExhSCT')
#returns a python dictionary
r = await fc.person(email='[email protected]')
# The number of requests left in the 60-second window.
rate_limit_remaining = r['X-Rate-Limit-Remaining']
print(r) # {u'socialProfiles': [...], u'demographics': {...}, ... }
print(rate_limit_remaining)
asyncio.get_event_loop().run_until_complete(get_person_by_email())
```
Supported Python Versions
-------------------------
* 3.6
* 3.7
* 3.8
* 3.9
Official Documentation
-------------------------
https://dashboard.fullcontact.com/api-ref | PypiClean |
/NonlinearLeastSquares-2.0.2.tar.gz/NonlinearLeastSquares-2.0.2/ExamplesStructureFromCameraMotion/bundle_adjust_sfm_with_uncalibrated_cameras_translations_only.py |
## bundle_adjust_sfm_with_uncalibrated_cameras_translations_only.py
## This script demonstrates how to use the sparse-bundle-adjustment capabilities
## of the NonlinearLeastSquares module for solving problems that require estimating
## both the scene structure and the camera parameters for the case when the data
## is collected with uncalibrated cameras.
## For any nonlinear least-squares method, you are required to supply starting value
## for the parameters you are estimating. Therefore, it is interesting to study
## at what point an algorithm starts getting trapped in a local minimum as you
## move the starting value farther and farther away from their true optimum values.
## You can perform those kinds of studies with this script by changing the values
## of the variables 'cam_pam_noise_factor' and 'structure_noise_factor'.
## Note that this script should produce results identical to those produced by
## the script
##
## sfm_with_uncalibrated_cameras_translations_only.py
##
## but, of course, much faster because it calls on the bundle-adjustment variant
## of the Levenberg-Marquardt algorithm. The results from the two scripts would
## be identical provided you use exactly the name number of world points, exactly
## the same number of camera positions, etc., in both cases.
## Calling syntax:
##
## bundle_adjust_sfm_with_uncalibrated_cameras_translations_only.py
import NonlinearLeastSquares
import ProjectiveCamera
import numpy
import random
import sys
random.seed('abracadabra')
#cam_pam_noise_factor = 1.0 ## This creates an initial average error in the 6
## camera parameters for each camera that is large
## enough to cause an average error of 60 pixels in
## the projections for each of the measurements.
## Note that the six parameters for a camera are
## (w_x,w_y,w_z,t_x,t_y,t_z). The pixel displacement
## error of 60 pixels is brought down to 12 units by
## LM in a couple of iterations if you start with
## zero structure noise
cam_pam_noise_factor = 0.1 ## creates an initial average error of 4.96 units
## which is brought down to 0.26 units in a couple
## of iterations.
#structure_noise_factor = 500
structure_noise_factor = 0 ## This controls the uncertainty in the initial
## values supplied for the structure variables.
## When set to 0, you can demonstrate how SBA
## can be used for a simultaneous calibration of
## of the camera in all its positions.
optimizer = NonlinearLeastSquares.NonlinearLeastSquares(
max_iterations = 400,
delta_for_jacobian = 0.000001,
)
# This returns a camera whose optic axis is aligned with the world-Z axis and whose
# image plane is parallel to the world-XY plane. The parameters 'alpha_x' and 'alpha_y'
# are for the focal length in terms of the image sampling intervals along the x-axis
# and along the y-axis, respectively. The parameters 'x0' and 'y0' are for the
# coordinates of the point in the camera image plane where the optic axis penetrates
# the image plane with respect to the origin in the image plane (which is usually a
# corner of the image):
camera = ProjectiveCamera.ProjectiveCamera(
camera_type = 'projective',
alpha_x = 100.0,
alpha_y = 100.0,
x0 = 100.0,
y0 = 100.0,
)
camera.initialize()
camera.print_camera_matrix()
## To get around the problem of "nan" values for Rodrigues params when rotation is zero:
## The argument to the 'rotate' function is in degrees
camera.rotate_previously_initialized_camera_around_world_X_axis(0.5)
world_points = camera.make_world_points_random(15)
print(world_points)
tracked_point_indexes_for_display = None
if len(world_points) > 6:
tracked_point_indexes_for_display = sorted(random.sample(range(len(world_points)), 6))
camera.set_tracked_point_indexes_for_display(tracked_point_indexes_for_display)
print("\n\ntracked_point_indexes_for_display: %s" % str(tracked_point_indexes_for_display))
#camera.display_world_points_double_triangles(world_points)
camera.set_num_world_points(len(world_points))
## In the next statement, the first triple after 'world_points" is for the rotations
## in degrees around the three world axes and the second triple is for the translations
## along the three world axes. The large argument is to set the scale.
world_points_xformed = camera.apply_transformation_to_generic_world_points(world_points, (0,0,0), (0.0,0.0,50000.0), 1.0)
print("world_points_xformed: %s" % str(world_points_xformed))
## Let us now move the camera around and collect the pixels:
number_of_camera_positions = 0
camera_params_ground_truth = []
#y_motion_delta = 500.0
y_motion_delta = 1000.0
all_pixels = []
for i in range(5):
if i == 0:
# The 2nd arg is the y_motion_delta which we set to zero for i=0
camera.translate_a_previously_initialized_camera((0.0,0.0,0.0))
else:
camera.translate_a_previously_initialized_camera((0.0,y_motion_delta,0.0))
camera.add_new_camera_to_list_of_cameras()
camera_params_ground_truth.append(camera.get_current_camera_pose())
pixels = camera.get_pixels_for_a_sequence_of_world_points(world_points_xformed)
all_pixels.append(pixels)
number_of_camera_positions += 1
print("\n\nall pixels with Y motions of the camera: %s" % str(all_pixels))
#x_motion_delta = 500.0
x_motion_delta = 1000.0
for i in range(5):
camera.translate_a_previously_initialized_camera((x_motion_delta,0.0,0.0))
camera.add_new_camera_to_list_of_cameras()
camera_params_ground_truth.append(camera.get_current_camera_pose())
pixels = camera.get_pixels_for_a_sequence_of_world_points(world_points_xformed)
all_pixels.append(pixels)
number_of_camera_positions += 1
print("\n\nall pixels with X and Y motions of the camera: %s" % str(all_pixels))
motion_history = camera._get_camera_motion_history()
print("\n\ncamera motion history: %s" % str(motion_history))
all_cameras = camera.get_all_cameras()
print("\n\nDisplaying all cameras:")
for item in all_cameras.items():
print("\nFor camera %d" % item[0])
print(item[1])
print("\n\nall pixels: %s" % str(all_pixels))
print("\ntotal number of camera positions: %d" % number_of_camera_positions)
camera.construct_X_vector_for_bundle_adjustment(all_pixels)
params_arranged_list = camera.construct_parameter_vec_for_uncalibrated_cameras_using_rodrigues_rotations()
print("\nAll parameters (camera + structure) stringified for one camera position: %s" % str(params_arranged_list))
print("\nNumber of all parameters (camera + structure) for estimation: %d" % len(params_arranged_list))
structure_params = params_arranged_list[6*len(all_cameras):]
print("\nStructure params: %s" % str(structure_params))
## We will initialize the parameters by adding noise to the ground truth. By varying
## the amount of noise, we can study the power of the nonlinear-least-squares with
## regard to the uncertainty in how the parameters are initialized. But first we
## need the ground truth:
ground_truth_dict = camera.set_all_parameters_to_ground_truth_for_sanity_check(world_points_xformed, camera_params_ground_truth)
## Now construct the prediction vector:
camera.construct_Fvec_for_bundle_adjustment()
# Get the structure ground truth:
structure_ground_truth = camera.construct_structure_ground_truth()
print("\n\nStructure ground truth: %s" % str(structure_ground_truth))
## Now initialize the parameters:
initial_params_dict = {}
initial_params_list = [] # need this later for visualization
initial_structure_params_dict = {}
initial_structure_params_list = []
for param in params_arranged_list:
if param not in structure_params:
if param.startswith('w_'):
initial_params_dict[param] = ground_truth_dict[param] + cam_pam_noise_factor*random.uniform(-1.0,1.0)
else:
initial_params_dict[param] = ground_truth_dict[param] + 1000*cam_pam_noise_factor*random.uniform(-1.0,1.0)
else:
initial_params_dict[param] = ground_truth_dict[param] + structure_noise_factor*random.uniform(-1.0,1.0)
initial_structure_params_list.append(initial_params_dict[param])
initial_params_list.append(initial_params_dict[param])
camera.set_initial_values_for_structure([initial_structure_params_list[3*i:3*i+3] for i in range(len(initial_structure_params_list)//3)])
print("\n\nParameters and their initial values: %s" % str(initial_params_dict))
camera.set_params_list(params_arranged_list)
camera.set_initial_val_all_params_as_dict(initial_params_dict)
camera.set_initial_val_all_params(initial_params_list)
camera.set_constructor_options_for_optimizer_BA(optimizer)
camera.display_structure()
result = camera.get_scene_structure_from_camera_motion_with_bundle_adjustment()
######################### print out the calculated structure ########################
print("\n\n\nRESULTS RETURNED BY bundle_adjust_sfm_with_calibrated_cameras_translations_only.py")
num_iterations_used = result['number_of_iterations']
error_norms_with_iterations = result['error_norms_with_iterations']
final_param_values_list = result['parameter_values']
structure_param_values_list = final_param_values_list[-len(structure_ground_truth):]
print("\nError norms with iterations: %s" % str(error_norms_with_iterations))
print("\nNumber of iterations used: %d" % num_iterations_used)
print("\nFinal values for the parameters:\n")
for i in range(len(params_arranged_list)):
print("%s => %s [ground truth: %s] (initial value: %s) \n" % (params_arranged_list[i], final_param_values_list[i], ground_truth_dict[params_arranged_list[i]], initial_params_dict[params_arranged_list[i]])) | PypiClean |
/BlastRadius-0.1.23.tar.gz/BlastRadius-0.1.23/blastradius/server/static/js/svg-pan-zoom.js | (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
var svgPanZoom = require('./svg-pan-zoom.js');
// UMD module definition
(function(window, document){
// AMD
if (typeof define === 'function' && define.amd) {
define('svg-pan-zoom', function () {
return svgPanZoom;
});
// CMD
} else if (typeof module !== 'undefined' && module.exports) {
module.exports = svgPanZoom;
// Browser
// Keep exporting globally as module.exports is available because of browserify
window.svgPanZoom = svgPanZoom;
}
})(window, document)
},{"./svg-pan-zoom.js":4}],2:[function(require,module,exports){
var SvgUtils = require('./svg-utilities');
module.exports = {
enable: function(instance) {
// Select (and create if necessary) defs
var defs = instance.svg.querySelector('defs')
if (!defs) {
defs = document.createElementNS(SvgUtils.svgNS, 'defs')
instance.svg.appendChild(defs)
}
// Check for style element, and create it if it doesn't exist
var styleEl = defs.querySelector('style#svg-pan-zoom-controls-styles');
if (!styleEl) {
var style = document.createElementNS(SvgUtils.svgNS, 'style')
style.setAttribute('id', 'svg-pan-zoom-controls-styles')
style.setAttribute('type', 'text/css')
style.textContent = '.svg-pan-zoom-control { cursor: pointer; fill: black; fill-opacity: 0.333; } .svg-pan-zoom-control:hover { fill-opacity: 0.8; } .svg-pan-zoom-control-background { fill: white; fill-opacity: 0.5; } .svg-pan-zoom-control-background { fill-opacity: 0.8; }'
defs.appendChild(style)
}
// Zoom Group
var zoomGroup = document.createElementNS(SvgUtils.svgNS, 'g');
zoomGroup.setAttribute('id', 'svg-pan-zoom-controls');
zoomGroup.setAttribute('transform', 'translate(' + ( instance.width - 70 ) + ' ' + ( instance.height - 76 ) + ') scale(0.75)');
zoomGroup.setAttribute('class', 'svg-pan-zoom-control');
// Control elements
zoomGroup.appendChild(this._createZoomIn(instance))
zoomGroup.appendChild(this._createZoomReset(instance))
zoomGroup.appendChild(this._createZoomOut(instance))
// Finally append created element
instance.svg.appendChild(zoomGroup)
// Cache control instance
instance.controlIcons = zoomGroup
}
, _createZoomIn: function(instance) {
var zoomIn = document.createElementNS(SvgUtils.svgNS, 'g');
zoomIn.setAttribute('id', 'svg-pan-zoom-zoom-in');
zoomIn.setAttribute('transform', 'translate(30.5 5) scale(0.015)');
zoomIn.setAttribute('class', 'svg-pan-zoom-control');
zoomIn.addEventListener('click', function() {instance.getPublicInstance().zoomIn()}, false)
zoomIn.addEventListener('touchstart', function() {instance.getPublicInstance().zoomIn()}, false)
var zoomInBackground = document.createElementNS(SvgUtils.svgNS, 'rect'); // TODO change these background space fillers to rounded rectangles so they look prettier
zoomInBackground.setAttribute('x', '0');
zoomInBackground.setAttribute('y', '0');
zoomInBackground.setAttribute('width', '1500'); // larger than expected because the whole group is transformed to scale down
zoomInBackground.setAttribute('height', '1400');
zoomInBackground.setAttribute('class', 'svg-pan-zoom-control-background');
zoomIn.appendChild(zoomInBackground);
var zoomInShape = document.createElementNS(SvgUtils.svgNS, 'path');
zoomInShape.setAttribute('d', 'M1280 576v128q0 26 -19 45t-45 19h-320v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-320q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h320v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h320q26 0 45 19t19 45zM1536 1120v-960 q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z');
zoomInShape.setAttribute('class', 'svg-pan-zoom-control-element');
zoomIn.appendChild(zoomInShape);
return zoomIn
}
, _createZoomReset: function(instance){
// reset
var resetPanZoomControl = document.createElementNS(SvgUtils.svgNS, 'g');
resetPanZoomControl.setAttribute('id', 'svg-pan-zoom-reset-pan-zoom');
resetPanZoomControl.setAttribute('transform', 'translate(5 35) scale(0.4)');
resetPanZoomControl.setAttribute('class', 'svg-pan-zoom-control');
resetPanZoomControl.addEventListener('click', function() {instance.getPublicInstance().reset()}, false);
resetPanZoomControl.addEventListener('touchstart', function() {instance.getPublicInstance().reset()}, false);
var resetPanZoomControlBackground = document.createElementNS(SvgUtils.svgNS, 'rect'); // TODO change these background space fillers to rounded rectangles so they look prettier
resetPanZoomControlBackground.setAttribute('x', '2');
resetPanZoomControlBackground.setAttribute('y', '2');
resetPanZoomControlBackground.setAttribute('width', '182'); // larger than expected because the whole group is transformed to scale down
resetPanZoomControlBackground.setAttribute('height', '58');
resetPanZoomControlBackground.setAttribute('class', 'svg-pan-zoom-control-background');
resetPanZoomControl.appendChild(resetPanZoomControlBackground);
var resetPanZoomControlShape1 = document.createElementNS(SvgUtils.svgNS, 'path');
resetPanZoomControlShape1.setAttribute('d', 'M33.051,20.632c-0.742-0.406-1.854-0.609-3.338-0.609h-7.969v9.281h7.769c1.543,0,2.701-0.188,3.473-0.562c1.365-0.656,2.048-1.953,2.048-3.891C35.032,22.757,34.372,21.351,33.051,20.632z');
resetPanZoomControlShape1.setAttribute('class', 'svg-pan-zoom-control-element');
resetPanZoomControl.appendChild(resetPanZoomControlShape1);
var resetPanZoomControlShape2 = document.createElementNS(SvgUtils.svgNS, 'path');
resetPanZoomControlShape2.setAttribute('d', 'M170.231,0.5H15.847C7.102,0.5,0.5,5.708,0.5,11.84v38.861C0.5,56.833,7.102,61.5,15.847,61.5h154.384c8.745,0,15.269-4.667,15.269-10.798V11.84C185.5,5.708,178.976,0.5,170.231,0.5z M42.837,48.569h-7.969c-0.219-0.766-0.375-1.383-0.469-1.852c-0.188-0.969-0.289-1.961-0.305-2.977l-0.047-3.211c-0.03-2.203-0.41-3.672-1.142-4.406c-0.732-0.734-2.103-1.102-4.113-1.102h-7.05v13.547h-7.055V14.022h16.524c2.361,0.047,4.178,0.344,5.45,0.891c1.272,0.547,2.351,1.352,3.234,2.414c0.731,0.875,1.31,1.844,1.737,2.906s0.64,2.273,0.64,3.633c0,1.641-0.414,3.254-1.242,4.84s-2.195,2.707-4.102,3.363c1.594,0.641,2.723,1.551,3.387,2.73s0.996,2.98,0.996,5.402v2.32c0,1.578,0.063,2.648,0.19,3.211c0.19,0.891,0.635,1.547,1.333,1.969V48.569z M75.579,48.569h-26.18V14.022h25.336v6.117H56.454v7.336h16.781v6H56.454v8.883h19.125V48.569z M104.497,46.331c-2.44,2.086-5.887,3.129-10.34,3.129c-4.548,0-8.125-1.027-10.731-3.082s-3.909-4.879-3.909-8.473h6.891c0.224,1.578,0.662,2.758,1.316,3.539c1.196,1.422,3.246,2.133,6.15,2.133c1.739,0,3.151-0.188,4.236-0.562c2.058-0.719,3.087-2.055,3.087-4.008c0-1.141-0.504-2.023-1.512-2.648c-1.008-0.609-2.607-1.148-4.796-1.617l-3.74-0.82c-3.676-0.812-6.201-1.695-7.576-2.648c-2.328-1.594-3.492-4.086-3.492-7.477c0-3.094,1.139-5.664,3.417-7.711s5.623-3.07,10.036-3.07c3.685,0,6.829,0.965,9.431,2.895c2.602,1.93,3.966,4.73,4.093,8.402h-6.938c-0.128-2.078-1.057-3.555-2.787-4.43c-1.154-0.578-2.587-0.867-4.301-0.867c-1.907,0-3.428,0.375-4.565,1.125c-1.138,0.75-1.706,1.797-1.706,3.141c0,1.234,0.561,2.156,1.682,2.766c0.721,0.406,2.25,0.883,4.589,1.43l6.063,1.43c2.657,0.625,4.648,1.461,5.975,2.508c2.059,1.625,3.089,3.977,3.089,7.055C108.157,41.624,106.937,44.245,104.497,46.331z M139.61,48.569h-26.18V14.022h25.336v6.117h-18.281v7.336h16.781v6h-16.781v8.883h19.125V48.569z M170.337,20.14h-10.336v28.43h-7.266V20.14h-10.383v-6.117h27.984V20.14z');
resetPanZoomControlShape2.setAttribute('class', 'svg-pan-zoom-control-element');
resetPanZoomControl.appendChild(resetPanZoomControlShape2);
return resetPanZoomControl
}
, _createZoomOut: function(instance){
// zoom out
var zoomOut = document.createElementNS(SvgUtils.svgNS, 'g');
zoomOut.setAttribute('id', 'svg-pan-zoom-zoom-out');
zoomOut.setAttribute('transform', 'translate(30.5 70) scale(0.015)');
zoomOut.setAttribute('class', 'svg-pan-zoom-control');
zoomOut.addEventListener('click', function() {instance.getPublicInstance().zoomOut()}, false);
zoomOut.addEventListener('touchstart', function() {instance.getPublicInstance().zoomOut()}, false);
var zoomOutBackground = document.createElementNS(SvgUtils.svgNS, 'rect'); // TODO change these background space fillers to rounded rectangles so they look prettier
zoomOutBackground.setAttribute('x', '0');
zoomOutBackground.setAttribute('y', '0');
zoomOutBackground.setAttribute('width', '1500'); // larger than expected because the whole group is transformed to scale down
zoomOutBackground.setAttribute('height', '1400');
zoomOutBackground.setAttribute('class', 'svg-pan-zoom-control-background');
zoomOut.appendChild(zoomOutBackground);
var zoomOutShape = document.createElementNS(SvgUtils.svgNS, 'path');
zoomOutShape.setAttribute('d', 'M1280 576v128q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h896q26 0 45 19t19 45zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5 t84.5 -203.5z');
zoomOutShape.setAttribute('class', 'svg-pan-zoom-control-element');
zoomOut.appendChild(zoomOutShape);
return zoomOut
}
, disable: function(instance) {
if (instance.controlIcons) {
instance.controlIcons.parentNode.removeChild(instance.controlIcons)
instance.controlIcons = null
}
}
}
},{"./svg-utilities":5}],3:[function(require,module,exports){
var SvgUtils = require('./svg-utilities')
, Utils = require('./utilities')
;
var ShadowViewport = function(viewport, options){
this.init(viewport, options)
}
/**
* Initialization
*
* @param {SVGElement} viewport
* @param {Object} options
*/
ShadowViewport.prototype.init = function(viewport, options) {
// DOM Elements
this.viewport = viewport
this.options = options
// State cache
this.originalState = {zoom: 1, x: 0, y: 0}
this.activeState = {zoom: 1, x: 0, y: 0}
this.updateCTMCached = Utils.proxy(this.updateCTM, this)
// Create a custom requestAnimationFrame taking in account refreshRate
this.requestAnimationFrame = Utils.createRequestAnimationFrame(this.options.refreshRate)
// ViewBox
this.viewBox = {x: 0, y: 0, width: 0, height: 0}
this.cacheViewBox()
// Process CTM
var newCTM = this.processCTM()
// Update viewport CTM and cache zoom and pan
this.setCTM(newCTM)
// Update CTM in this frame
this.updateCTM()
}
/**
* Cache initial viewBox value
* If no viewBox is defined, then use viewport size/position instead for viewBox values
*/
ShadowViewport.prototype.cacheViewBox = function() {
var svgViewBox = this.options.svg.getAttribute('viewBox')
if (svgViewBox) {
var viewBoxValues = svgViewBox.split(/[\s\,]/).filter(function(v){return v}).map(parseFloat)
// Cache viewbox x and y offset
this.viewBox.x = viewBoxValues[0]
this.viewBox.y = viewBoxValues[1]
this.viewBox.width = viewBoxValues[2]
this.viewBox.height = viewBoxValues[3]
var zoom = Math.min(this.options.width / this.viewBox.width, this.options.height / this.viewBox.height)
// Update active state
this.activeState.zoom = zoom
this.activeState.x = (this.options.width - this.viewBox.width * zoom) / 2
this.activeState.y = (this.options.height - this.viewBox.height * zoom) / 2
// Force updating CTM
this.updateCTMOnNextFrame()
this.options.svg.removeAttribute('viewBox')
} else {
this.simpleViewBoxCache()
}
}
/**
* Recalculate viewport sizes and update viewBox cache
*/
ShadowViewport.prototype.simpleViewBoxCache = function() {
var bBox = this.viewport.getBBox()
this.viewBox.x = bBox.x
this.viewBox.y = bBox.y
this.viewBox.width = bBox.width
this.viewBox.height = bBox.height
}
/**
* Returns a viewbox object. Safe to alter
*
* @return {Object} viewbox object
*/
ShadowViewport.prototype.getViewBox = function() {
return Utils.extend({}, this.viewBox)
}
/**
* Get initial zoom and pan values. Save them into originalState
* Parses viewBox attribute to alter initial sizes
*
* @return {CTM} CTM object based on options
*/
ShadowViewport.prototype.processCTM = function() {
var newCTM = this.getCTM()
if (this.options.fit || this.options.contain) {
var newScale;
if (this.options.fit) {
newScale = Math.min(this.options.width/this.viewBox.width, this.options.height/this.viewBox.height);
} else {
newScale = Math.max(this.options.width/this.viewBox.width, this.options.height/this.viewBox.height);
}
newCTM.a = newScale; //x-scale
newCTM.d = newScale; //y-scale
newCTM.e = -this.viewBox.x * newScale; //x-transform
newCTM.f = -this.viewBox.y * newScale; //y-transform
}
if (this.options.center) {
var offsetX = (this.options.width - (this.viewBox.width + this.viewBox.x * 2) * newCTM.a) * 0.5
, offsetY = (this.options.height - (this.viewBox.height + this.viewBox.y * 2) * newCTM.a) * 0.5
newCTM.e = offsetX
newCTM.f = offsetY
}
// Cache initial values. Based on activeState and fix+center opitons
this.originalState.zoom = newCTM.a
this.originalState.x = newCTM.e
this.originalState.y = newCTM.f
return newCTM
}
/**
* Return originalState object. Safe to alter
*
* @return {Object}
*/
ShadowViewport.prototype.getOriginalState = function() {
return Utils.extend({}, this.originalState)
}
/**
* Return actualState object. Safe to alter
*
* @return {Object}
*/
ShadowViewport.prototype.getState = function() {
return Utils.extend({}, this.activeState)
}
/**
* Get zoom scale
*
* @return {Float} zoom scale
*/
ShadowViewport.prototype.getZoom = function() {
return this.activeState.zoom
}
/**
* Get zoom scale for pubilc usage
*
* @return {Float} zoom scale
*/
ShadowViewport.prototype.getRelativeZoom = function() {
return this.activeState.zoom / this.originalState.zoom
}
/**
* Compute zoom scale for pubilc usage
*
* @return {Float} zoom scale
*/
ShadowViewport.prototype.computeRelativeZoom = function(scale) {
return scale / this.originalState.zoom
}
/**
* Get pan
*
* @return {Object}
*/
ShadowViewport.prototype.getPan = function() {
return {x: this.activeState.x, y: this.activeState.y}
}
/**
* Return cached viewport CTM value that can be safely modified
*
* @return {SVGMatrix}
*/
ShadowViewport.prototype.getCTM = function() {
var safeCTM = this.options.svg.createSVGMatrix()
// Copy values manually as in FF they are not itterable
safeCTM.a = this.activeState.zoom
safeCTM.b = 0
safeCTM.c = 0
safeCTM.d = this.activeState.zoom
safeCTM.e = this.activeState.x
safeCTM.f = this.activeState.y
return safeCTM
}
/**
* Set a new CTM
*
* @param {SVGMatrix} newCTM
*/
ShadowViewport.prototype.setCTM = function(newCTM) {
var willZoom = this.isZoomDifferent(newCTM)
, willPan = this.isPanDifferent(newCTM)
if (willZoom || willPan) {
// Before zoom
if (willZoom) {
// If returns false then cancel zooming
if (this.options.beforeZoom(this.getRelativeZoom(), this.computeRelativeZoom(newCTM.a)) === false) {
newCTM.a = newCTM.d = this.activeState.zoom
willZoom = false
} else {
this.updateCache(newCTM);
this.options.onZoom(this.getRelativeZoom())
}
}
// Before pan
if (willPan) {
var preventPan = this.options.beforePan(this.getPan(), {x: newCTM.e, y: newCTM.f})
// If prevent pan is an object
, preventPanX = false
, preventPanY = false
// If prevent pan is Boolean false
if (preventPan === false) {
// Set x and y same as before
newCTM.e = this.getPan().x
newCTM.f = this.getPan().y
preventPanX = preventPanY = true
} else if (Utils.isObject(preventPan)) {
// Check for X axes attribute
if (preventPan.x === false) {
// Prevent panning on x axes
newCTM.e = this.getPan().x
preventPanX = true
} else if (Utils.isNumber(preventPan.x)) {
// Set a custom pan value
newCTM.e = preventPan.x
}
// Check for Y axes attribute
if (preventPan.y === false) {
// Prevent panning on x axes
newCTM.f = this.getPan().y
preventPanY = true
} else if (Utils.isNumber(preventPan.y)) {
// Set a custom pan value
newCTM.f = preventPan.y
}
}
// Update willPan flag
// Check if newCTM is still different
if ((preventPanX && preventPanY) || !this.isPanDifferent(newCTM)) {
willPan = false
} else {
this.updateCache(newCTM);
this.options.onPan(this.getPan());
}
}
// Check again if should zoom or pan
if (willZoom || willPan) {
this.updateCTMOnNextFrame()
}
}
}
ShadowViewport.prototype.isZoomDifferent = function(newCTM) {
return this.activeState.zoom !== newCTM.a
}
ShadowViewport.prototype.isPanDifferent = function(newCTM) {
return this.activeState.x !== newCTM.e || this.activeState.y !== newCTM.f
}
/**
* Update cached CTM and active state
*
* @param {SVGMatrix} newCTM
*/
ShadowViewport.prototype.updateCache = function(newCTM) {
this.activeState.zoom = newCTM.a
this.activeState.x = newCTM.e
this.activeState.y = newCTM.f
}
ShadowViewport.prototype.pendingUpdate = false
/**
* Place a request to update CTM on next Frame
*/
ShadowViewport.prototype.updateCTMOnNextFrame = function() {
if (!this.pendingUpdate) {
// Lock
this.pendingUpdate = true
// Throttle next update
this.requestAnimationFrame.call(window, this.updateCTMCached)
}
}
/**
* Update viewport CTM with cached CTM
*/
ShadowViewport.prototype.updateCTM = function() {
var ctm = this.getCTM()
// Updates SVG element
SvgUtils.setCTM(this.viewport, ctm, this.defs)
// Free the lock
this.pendingUpdate = false
// Notify about the update
if(this.options.onUpdatedCTM) {
this.options.onUpdatedCTM(ctm)
}
}
module.exports = function(viewport, options){
return new ShadowViewport(viewport, options)
}
},{"./svg-utilities":5,"./utilities":7}],4:[function(require,module,exports){
var Wheel = require('./uniwheel')
, ControlIcons = require('./control-icons')
, Utils = require('./utilities')
, SvgUtils = require('./svg-utilities')
, ShadowViewport = require('./shadow-viewport')
var SvgPanZoom = function(svg, options) {
this.init(svg, options)
}
var optionsDefaults = {
viewportSelector: '.svg-pan-zoom_viewport' // Viewport selector. Can be querySelector string or SVGElement
, panEnabled: true // enable or disable panning (default enabled)
, controlIconsEnabled: false // insert icons to give user an option in addition to mouse events to control pan/zoom (default disabled)
, zoomEnabled: true // enable or disable zooming (default enabled)
, dblClickZoomEnabled: true // enable or disable zooming by double clicking (default enabled)
, mouseWheelZoomEnabled: true // enable or disable zooming by mouse wheel (default enabled)
, preventMouseEventsDefault: true // enable or disable preventDefault for mouse events
, zoomScaleSensitivity: 0.1 // Zoom sensitivity
, minZoom: 0.5 // Minimum Zoom level
, maxZoom: 10 // Maximum Zoom level
, fit: true // enable or disable viewport fit in SVG (default true)
, contain: false // enable or disable viewport contain the svg (default false)
, center: true // enable or disable viewport centering in SVG (default true)
, refreshRate: 'auto' // Maximum number of frames per second (altering SVG's viewport)
, beforeZoom: null
, onZoom: null
, beforePan: null
, onPan: null
, customEventsHandler: null
, eventsListenerElement: null
, onUpdatedCTM: null
}
SvgPanZoom.prototype.init = function(svg, options) {
var that = this
this.svg = svg
this.defs = svg.querySelector('defs')
// Add default attributes to SVG
SvgUtils.setupSvgAttributes(this.svg)
// Set options
this.options = Utils.extend(Utils.extend({}, optionsDefaults), options)
// Set default state
this.state = 'none'
// Get dimensions
var boundingClientRectNormalized = SvgUtils.getBoundingClientRectNormalized(svg)
this.width = boundingClientRectNormalized.width
this.height = boundingClientRectNormalized.height
// Init shadow viewport
this.viewport = ShadowViewport(SvgUtils.getOrCreateViewport(this.svg, this.options.viewportSelector), {
svg: this.svg
, width: this.width
, height: this.height
, fit: this.options.fit
, contain: this.options.contain
, center: this.options.center
, refreshRate: this.options.refreshRate
// Put callbacks into functions as they can change through time
, beforeZoom: function(oldScale, newScale) {
if (that.viewport && that.options.beforeZoom) {return that.options.beforeZoom(oldScale, newScale)}
}
, onZoom: function(scale) {
if (that.viewport && that.options.onZoom) {return that.options.onZoom(scale)}
}
, beforePan: function(oldPoint, newPoint) {
if (that.viewport && that.options.beforePan) {return that.options.beforePan(oldPoint, newPoint)}
}
, onPan: function(point) {
if (that.viewport && that.options.onPan) {return that.options.onPan(point)}
}
, onUpdatedCTM: function(ctm) {
if (that.viewport && that.options.onUpdatedCTM) {return that.options.onUpdatedCTM(ctm)}
}
})
// Wrap callbacks into public API context
var publicInstance = this.getPublicInstance()
publicInstance.setBeforeZoom(this.options.beforeZoom)
publicInstance.setOnZoom(this.options.onZoom)
publicInstance.setBeforePan(this.options.beforePan)
publicInstance.setOnPan(this.options.onPan)
publicInstance.setOnUpdatedCTM(this.options.onUpdatedCTM)
if (this.options.controlIconsEnabled) {
ControlIcons.enable(this)
}
// Init events handlers
this.lastMouseWheelEventTime = Date.now()
this.setupHandlers()
}
/**
* Register event handlers
*/
SvgPanZoom.prototype.setupHandlers = function() {
var that = this
, prevEvt = null // use for touchstart event to detect double tap
;
this.eventListeners = {
// Mouse down group
mousedown: function(evt) {
var result = that.handleMouseDown(evt, prevEvt);
prevEvt = evt
return result;
}
, touchstart: function(evt) {
var result = that.handleMouseDown(evt, prevEvt);
prevEvt = evt
return result;
}
// Mouse up group
, mouseup: function(evt) {
return that.handleMouseUp(evt);
}
, touchend: function(evt) {
return that.handleMouseUp(evt);
}
// Mouse move group
, mousemove: function(evt) {
return that.handleMouseMove(evt);
}
, touchmove: function(evt) {
return that.handleMouseMove(evt);
}
// Mouse leave group
, mouseleave: function(evt) {
return that.handleMouseUp(evt);
}
, touchleave: function(evt) {
return that.handleMouseUp(evt);
}
, touchcancel: function(evt) {
return that.handleMouseUp(evt);
}
}
// Init custom events handler if available
if (this.options.customEventsHandler != null) { // jshint ignore:line
this.options.customEventsHandler.init({
svgElement: this.svg
, eventsListenerElement: this.options.eventsListenerElement
, instance: this.getPublicInstance()
})
// Custom event handler may halt builtin listeners
var haltEventListeners = this.options.customEventsHandler.haltEventListeners
if (haltEventListeners && haltEventListeners.length) {
for (var i = haltEventListeners.length - 1; i >= 0; i--) {
if (this.eventListeners.hasOwnProperty(haltEventListeners[i])) {
delete this.eventListeners[haltEventListeners[i]]
}
}
}
}
// Bind eventListeners
for (var event in this.eventListeners) {
// Attach event to eventsListenerElement or SVG if not available
(this.options.eventsListenerElement || this.svg)
.addEventListener(event, this.eventListeners[event], false)
}
// Zoom using mouse wheel
if (this.options.mouseWheelZoomEnabled) {
this.options.mouseWheelZoomEnabled = false // set to false as enable will set it back to true
this.enableMouseWheelZoom()
}
}
/**
* Enable ability to zoom using mouse wheel
*/
SvgPanZoom.prototype.enableMouseWheelZoom = function() {
if (!this.options.mouseWheelZoomEnabled) {
var that = this
// Mouse wheel listener
this.wheelListener = function(evt) {
return that.handleMouseWheel(evt);
}
// Bind wheelListener
Wheel.on(this.options.eventsListenerElement || this.svg, this.wheelListener, false)
this.options.mouseWheelZoomEnabled = true
}
}
/**
* Disable ability to zoom using mouse wheel
*/
SvgPanZoom.prototype.disableMouseWheelZoom = function() {
if (this.options.mouseWheelZoomEnabled) {
Wheel.off(this.options.eventsListenerElement || this.svg, this.wheelListener, false)
this.options.mouseWheelZoomEnabled = false
}
}
/**
* Handle mouse wheel event
*
* @param {Event} evt
*/
SvgPanZoom.prototype.handleMouseWheel = function(evt) {
if (!this.options.zoomEnabled || this.state !== 'none') {
return;
}
if (this.options.preventMouseEventsDefault){
if (evt.preventDefault) {
evt.preventDefault();
} else {
evt.returnValue = false;
}
}
// Default delta in case that deltaY is not available
var delta = evt.deltaY || 1
, timeDelta = Date.now() - this.lastMouseWheelEventTime
, divider = 3 + Math.max(0, 30 - timeDelta)
// Update cache
this.lastMouseWheelEventTime = Date.now()
// Make empirical adjustments for browsers that give deltaY in pixels (deltaMode=0)
if ('deltaMode' in evt && evt.deltaMode === 0 && evt.wheelDelta) {
delta = evt.deltaY === 0 ? 0 : Math.abs(evt.wheelDelta) / evt.deltaY
}
delta = -0.3 < delta && delta < 0.3 ? delta : (delta > 0 ? 1 : -1) * Math.log(Math.abs(delta) + 10) / divider
var inversedScreenCTM = this.svg.getScreenCTM().inverse()
, relativeMousePoint = SvgUtils.getEventPoint(evt, this.svg).matrixTransform(inversedScreenCTM)
, zoom = Math.pow(1 + this.options.zoomScaleSensitivity, (-1) * delta); // multiplying by neg. 1 so as to make zoom in/out behavior match Google maps behavior
this.zoomAtPoint(zoom, relativeMousePoint)
}
/**
* Zoom in at a SVG point
*
* @param {SVGPoint} point
* @param {Float} zoomScale Number representing how much to zoom
* @param {Boolean} zoomAbsolute Default false. If true, zoomScale is treated as an absolute value.
* Otherwise, zoomScale is treated as a multiplied (e.g. 1.10 would zoom in 10%)
*/
SvgPanZoom.prototype.zoomAtPoint = function(zoomScale, point, zoomAbsolute) {
var originalState = this.viewport.getOriginalState()
if (!zoomAbsolute) {
// Fit zoomScale in set bounds
if (this.getZoom() * zoomScale < this.options.minZoom * originalState.zoom) {
zoomScale = (this.options.minZoom * originalState.zoom) / this.getZoom()
} else if (this.getZoom() * zoomScale > this.options.maxZoom * originalState.zoom) {
zoomScale = (this.options.maxZoom * originalState.zoom) / this.getZoom()
}
} else {
// Fit zoomScale in set bounds
zoomScale = Math.max(this.options.minZoom * originalState.zoom, Math.min(this.options.maxZoom * originalState.zoom, zoomScale))
// Find relative scale to achieve desired scale
zoomScale = zoomScale/this.getZoom()
}
var oldCTM = this.viewport.getCTM()
, relativePoint = point.matrixTransform(oldCTM.inverse())
, modifier = this.svg.createSVGMatrix().translate(relativePoint.x, relativePoint.y).scale(zoomScale).translate(-relativePoint.x, -relativePoint.y)
, newCTM = oldCTM.multiply(modifier)
if (newCTM.a !== oldCTM.a) {
this.viewport.setCTM(newCTM)
}
}
/**
* Zoom at center point
*
* @param {Float} scale
* @param {Boolean} absolute Marks zoom scale as relative or absolute
*/
SvgPanZoom.prototype.zoom = function(scale, absolute) {
this.zoomAtPoint(scale, SvgUtils.getSvgCenterPoint(this.svg, this.width, this.height), absolute)
}
/**
* Zoom used by public instance
*
* @param {Float} scale
* @param {Boolean} absolute Marks zoom scale as relative or absolute
*/
SvgPanZoom.prototype.publicZoom = function(scale, absolute) {
if (absolute) {
scale = this.computeFromRelativeZoom(scale)
}
this.zoom(scale, absolute)
}
/**
* Zoom at point used by public instance
*
* @param {Float} scale
* @param {SVGPoint|Object} point An object that has x and y attributes
* @param {Boolean} absolute Marks zoom scale as relative or absolute
*/
SvgPanZoom.prototype.publicZoomAtPoint = function(scale, point, absolute) {
if (absolute) {
// Transform zoom into a relative value
scale = this.computeFromRelativeZoom(scale)
}
// If not a SVGPoint but has x and y then create a SVGPoint
if (Utils.getType(point) !== 'SVGPoint') {
if('x' in point && 'y' in point) {
point = SvgUtils.createSVGPoint(this.svg, point.x, point.y)
} else {
throw new Error('Given point is invalid')
}
}
this.zoomAtPoint(scale, point, absolute)
}
/**
* Get zoom scale
*
* @return {Float} zoom scale
*/
SvgPanZoom.prototype.getZoom = function() {
return this.viewport.getZoom()
}
/**
* Get zoom scale for public usage
*
* @return {Float} zoom scale
*/
SvgPanZoom.prototype.getRelativeZoom = function() {
return this.viewport.getRelativeZoom()
}
/**
* Compute actual zoom from public zoom
*
* @param {Float} zoom
* @return {Float} zoom scale
*/
SvgPanZoom.prototype.computeFromRelativeZoom = function(zoom) {
return zoom * this.viewport.getOriginalState().zoom
}
/**
* Set zoom to initial state
*/
SvgPanZoom.prototype.resetZoom = function() {
var originalState = this.viewport.getOriginalState()
this.zoom(originalState.zoom, true);
}
/**
* Set pan to initial state
*/
SvgPanZoom.prototype.resetPan = function() {
this.pan(this.viewport.getOriginalState());
}
/**
* Set pan and zoom to initial state
*/
SvgPanZoom.prototype.reset = function() {
this.resetZoom()
this.resetPan()
}
/**
* Handle double click event
* See handleMouseDown() for alternate detection method
*
* @param {Event} evt
*/
SvgPanZoom.prototype.handleDblClick = function(evt) {
if (this.options.preventMouseEventsDefault) {
if (evt.preventDefault) {
evt.preventDefault()
} else {
evt.returnValue = false
}
}
// Check if target was a control button
if (this.options.controlIconsEnabled) {
var targetClass = evt.target.getAttribute('class') || ''
if (targetClass.indexOf('svg-pan-zoom-control') > -1) {
return false
}
}
var zoomFactor
if (evt.shiftKey) {
zoomFactor = 1/((1 + this.options.zoomScaleSensitivity) * 2) // zoom out when shift key pressed
} else {
zoomFactor = (1 + this.options.zoomScaleSensitivity) * 2
}
var point = SvgUtils.getEventPoint(evt, this.svg).matrixTransform(this.svg.getScreenCTM().inverse())
this.zoomAtPoint(zoomFactor, point)
}
/**
* Handle click event
*
* @param {Event} evt
*/
SvgPanZoom.prototype.handleMouseDown = function(evt, prevEvt) {
if (this.options.preventMouseEventsDefault) {
if (evt.preventDefault) {
evt.preventDefault()
} else {
evt.returnValue = false
}
}
Utils.mouseAndTouchNormalize(evt, this.svg)
// Double click detection; more consistent than ondblclick
if (this.options.dblClickZoomEnabled && Utils.isDblClick(evt, prevEvt)){
this.handleDblClick(evt)
} else {
// Pan mode
this.state = 'pan'
this.firstEventCTM = this.viewport.getCTM()
this.stateOrigin = SvgUtils.getEventPoint(evt, this.svg).matrixTransform(this.firstEventCTM.inverse())
}
}
/**
* Handle mouse move event
*
* @param {Event} evt
*/
SvgPanZoom.prototype.handleMouseMove = function(evt) {
if (this.options.preventMouseEventsDefault) {
if (evt.preventDefault) {
evt.preventDefault()
} else {
evt.returnValue = false
}
}
if (this.state === 'pan' && this.options.panEnabled) {
// Pan mode
var point = SvgUtils.getEventPoint(evt, this.svg).matrixTransform(this.firstEventCTM.inverse())
, viewportCTM = this.firstEventCTM.translate(point.x - this.stateOrigin.x, point.y - this.stateOrigin.y)
this.viewport.setCTM(viewportCTM)
}
}
/**
* Handle mouse button release event
*
* @param {Event} evt
*/
SvgPanZoom.prototype.handleMouseUp = function(evt) {
if (this.options.preventMouseEventsDefault) {
if (evt.preventDefault) {
evt.preventDefault()
} else {
evt.returnValue = false
}
}
if (this.state === 'pan') {
// Quit pan mode
this.state = 'none'
}
}
/**
* Adjust viewport size (only) so it will fit in SVG
* Does not center image
*/
SvgPanZoom.prototype.fit = function() {
var viewBox = this.viewport.getViewBox()
, newScale = Math.min(this.width/viewBox.width, this.height/viewBox.height)
this.zoom(newScale, true)
}
/**
* Adjust viewport size (only) so it will contain the SVG
* Does not center image
*/
SvgPanZoom.prototype.contain = function() {
var viewBox = this.viewport.getViewBox()
, newScale = Math.max(this.width/viewBox.width, this.height/viewBox.height)
this.zoom(newScale, true)
}
/**
* Adjust viewport pan (only) so it will be centered in SVG
* Does not zoom/fit/contain image
*/
SvgPanZoom.prototype.center = function() {
var viewBox = this.viewport.getViewBox()
, offsetX = (this.width - (viewBox.width + viewBox.x * 2) * this.getZoom()) * 0.5
, offsetY = (this.height - (viewBox.height + viewBox.y * 2) * this.getZoom()) * 0.5
this.getPublicInstance().pan({x: offsetX, y: offsetY})
}
/**
* Update content cached BorderBox
* Use when viewport contents change
*/
SvgPanZoom.prototype.updateBBox = function() {
this.viewport.simpleViewBoxCache()
}
/**
* Pan to a rendered position
*
* @param {Object} point {x: 0, y: 0}
*/
SvgPanZoom.prototype.pan = function(point) {
var viewportCTM = this.viewport.getCTM()
viewportCTM.e = point.x
viewportCTM.f = point.y
this.viewport.setCTM(viewportCTM)
}
/**
* Relatively pan the graph by a specified rendered position vector
*
* @param {Object} point {x: 0, y: 0}
*/
SvgPanZoom.prototype.panBy = function(point) {
var viewportCTM = this.viewport.getCTM()
viewportCTM.e += point.x
viewportCTM.f += point.y
this.viewport.setCTM(viewportCTM)
}
/**
* Get pan vector
*
* @return {Object} {x: 0, y: 0}
*/
SvgPanZoom.prototype.getPan = function() {
var state = this.viewport.getState()
return {x: state.x, y: state.y}
}
/**
* Recalculates cached svg dimensions and controls position
*/
SvgPanZoom.prototype.resize = function() {
// Get dimensions
var boundingClientRectNormalized = SvgUtils.getBoundingClientRectNormalized(this.svg)
this.width = boundingClientRectNormalized.width
this.height = boundingClientRectNormalized.height
// Recalculate original state
var viewport = this.viewport
viewport.options.width = this.width
viewport.options.height = this.height
viewport.processCTM()
// Reposition control icons by re-enabling them
if (this.options.controlIconsEnabled) {
this.getPublicInstance().disableControlIcons()
this.getPublicInstance().enableControlIcons()
}
}
/**
* Unbind mouse events, free callbacks and destroy public instance
*/
SvgPanZoom.prototype.destroy = function() {
var that = this
// Free callbacks
this.beforeZoom = null
this.onZoom = null
this.beforePan = null
this.onPan = null
this.onUpdatedCTM = null
// Destroy custom event handlers
if (this.options.customEventsHandler != null) { // jshint ignore:line
this.options.customEventsHandler.destroy({
svgElement: this.svg
, eventsListenerElement: this.options.eventsListenerElement
, instance: this.getPublicInstance()
})
}
// Unbind eventListeners
for (var event in this.eventListeners) {
(this.options.eventsListenerElement || this.svg)
.removeEventListener(event, this.eventListeners[event], false)
}
// Unbind wheelListener
this.disableMouseWheelZoom()
// Remove control icons
this.getPublicInstance().disableControlIcons()
// Reset zoom and pan
this.reset()
// Remove instance from instancesStore
instancesStore = instancesStore.filter(function(instance){
return instance.svg !== that.svg
})
// Delete options and its contents
delete this.options
// Delete viewport to make public shadow viewport functions uncallable
delete this.viewport
// Destroy public instance and rewrite getPublicInstance
delete this.publicInstance
delete this.pi
this.getPublicInstance = function(){
return null
}
}
/**
* Returns a public instance object
*
* @return {Object} Public instance object
*/
SvgPanZoom.prototype.getPublicInstance = function() {
var that = this
// Create cache
if (!this.publicInstance) {
this.publicInstance = this.pi = {
// Pan
enablePan: function() {that.options.panEnabled = true; return that.pi}
, disablePan: function() {that.options.panEnabled = false; return that.pi}
, isPanEnabled: function() {return !!that.options.panEnabled}
, pan: function(point) {that.pan(point); return that.pi}
, panBy: function(point) {that.panBy(point); return that.pi}
, getPan: function() {return that.getPan()}
// Pan event
, setBeforePan: function(fn) {that.options.beforePan = fn === null ? null : Utils.proxy(fn, that.publicInstance); return that.pi}
, setOnPan: function(fn) {that.options.onPan = fn === null ? null : Utils.proxy(fn, that.publicInstance); return that.pi}
// Zoom and Control Icons
, enableZoom: function() {that.options.zoomEnabled = true; return that.pi}
, disableZoom: function() {that.options.zoomEnabled = false; return that.pi}
, isZoomEnabled: function() {return !!that.options.zoomEnabled}
, enableControlIcons: function() {
if (!that.options.controlIconsEnabled) {
that.options.controlIconsEnabled = true
ControlIcons.enable(that)
}
return that.pi
}
, disableControlIcons: function() {
if (that.options.controlIconsEnabled) {
that.options.controlIconsEnabled = false;
ControlIcons.disable(that)
}
return that.pi
}
, isControlIconsEnabled: function() {return !!that.options.controlIconsEnabled}
// Double click zoom
, enableDblClickZoom: function() {that.options.dblClickZoomEnabled = true; return that.pi}
, disableDblClickZoom: function() {that.options.dblClickZoomEnabled = false; return that.pi}
, isDblClickZoomEnabled: function() {return !!that.options.dblClickZoomEnabled}
// Mouse wheel zoom
, enableMouseWheelZoom: function() {that.enableMouseWheelZoom(); return that.pi}
, disableMouseWheelZoom: function() {that.disableMouseWheelZoom(); return that.pi}
, isMouseWheelZoomEnabled: function() {return !!that.options.mouseWheelZoomEnabled}
// Zoom scale and bounds
, setZoomScaleSensitivity: function(scale) {that.options.zoomScaleSensitivity = scale; return that.pi}
, setMinZoom: function(zoom) {that.options.minZoom = zoom; return that.pi}
, setMaxZoom: function(zoom) {that.options.maxZoom = zoom; return that.pi}
// Zoom event
, setBeforeZoom: function(fn) {that.options.beforeZoom = fn === null ? null : Utils.proxy(fn, that.publicInstance); return that.pi}
, setOnZoom: function(fn) {that.options.onZoom = fn === null ? null : Utils.proxy(fn, that.publicInstance); return that.pi}
// Zooming
, zoom: function(scale) {that.publicZoom(scale, true); return that.pi}
, zoomBy: function(scale) {that.publicZoom(scale, false); return that.pi}
, zoomAtPoint: function(scale, point) {that.publicZoomAtPoint(scale, point, true); return that.pi}
, zoomAtPointBy: function(scale, point) {that.publicZoomAtPoint(scale, point, false); return that.pi}
, zoomIn: function() {this.zoomBy(1 + that.options.zoomScaleSensitivity); return that.pi}
, zoomOut: function() {this.zoomBy(1 / (1 + that.options.zoomScaleSensitivity)); return that.pi}
, getZoom: function() {return that.getRelativeZoom()}
// CTM update
, setOnUpdatedCTM: function(fn) {that.options.onUpdatedCTM = fn === null ? null : Utils.proxy(fn, that.publicInstance); return that.pi}
// Reset
, resetZoom: function() {that.resetZoom(); return that.pi}
, resetPan: function() {that.resetPan(); return that.pi}
, reset: function() {that.reset(); return that.pi}
// Fit, Contain and Center
, fit: function() {that.fit(); return that.pi}
, contain: function() {that.contain(); return that.pi}
, center: function() {that.center(); return that.pi}
// Size and Resize
, updateBBox: function() {that.updateBBox(); return that.pi}
, resize: function() {that.resize(); return that.pi}
, getSizes: function() {
return {
width: that.width
, height: that.height
, realZoom: that.getZoom()
, viewBox: that.viewport.getViewBox()
}
}
// Destroy
, destroy: function() {that.destroy(); return that.pi}
}
}
return this.publicInstance
}
/**
* Stores pairs of instances of SvgPanZoom and SVG
* Each pair is represented by an object {svg: SVGSVGElement, instance: SvgPanZoom}
*
* @type {Array}
*/
var instancesStore = []
var svgPanZoom = function(elementOrSelector, options){
var svg = Utils.getSvg(elementOrSelector)
if (svg === null) {
return null
} else {
// Look for existent instance
for(var i = instancesStore.length - 1; i >= 0; i--) {
if (instancesStore[i].svg === svg) {
return instancesStore[i].instance.getPublicInstance()
}
}
// If instance not found - create one
instancesStore.push({
svg: svg
, instance: new SvgPanZoom(svg, options)
})
// Return just pushed instance
return instancesStore[instancesStore.length - 1].instance.getPublicInstance()
}
}
module.exports = svgPanZoom;
},{"./control-icons":2,"./shadow-viewport":3,"./svg-utilities":5,"./uniwheel":6,"./utilities":7}],5:[function(require,module,exports){
var Utils = require('./utilities')
, _browser = 'unknown'
;
// http://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser
if (/*@cc_on!@*/false || !!document.documentMode) { // internet explorer
_browser = 'ie';
}
module.exports = {
svgNS: 'http://www.w3.org/2000/svg'
, xmlNS: 'http://www.w3.org/XML/1998/namespace'
, xmlnsNS: 'http://www.w3.org/2000/xmlns/'
, xlinkNS: 'http://www.w3.org/1999/xlink'
, evNS: 'http://www.w3.org/2001/xml-events'
/**
* Get svg dimensions: width and height
*
* @param {SVGSVGElement} svg
* @return {Object} {width: 0, height: 0}
*/
, getBoundingClientRectNormalized: function(svg) {
if (svg.clientWidth && svg.clientHeight) {
return {width: svg.clientWidth, height: svg.clientHeight}
} else if (!!svg.getBoundingClientRect()) {
return svg.getBoundingClientRect();
} else {
throw new Error('Cannot get BoundingClientRect for SVG.');
}
}
/**
* Gets g element with class of "viewport" or creates it if it doesn't exist
*
* @param {SVGSVGElement} svg
* @return {SVGElement} g (group) element
*/
, getOrCreateViewport: function(svg, selector) {
var viewport = null
if (Utils.isElement(selector)) {
viewport = selector
} else {
viewport = svg.querySelector(selector)
}
// Check if there is just one main group in SVG
if (!viewport) {
var childNodes = Array.prototype.slice.call(svg.childNodes || svg.children).filter(function(el){
return el.nodeName !== 'defs' && el.nodeName !== '#text'
})
// Node name should be SVGGElement and should have no transform attribute
// Groups with transform are not used as viewport because it involves parsing of all transform possibilities
if (childNodes.length === 1 && childNodes[0].nodeName === 'g' && childNodes[0].getAttribute('transform') === null) {
viewport = childNodes[0]
}
}
// If no favorable group element exists then create one
if (!viewport) {
var viewportId = 'viewport-' + new Date().toISOString().replace(/\D/g, '');
viewport = document.createElementNS(this.svgNS, 'g');
viewport.setAttribute('id', viewportId);
// Internet Explorer (all versions?) can't use childNodes, but other browsers prefer (require?) using childNodes
var svgChildren = svg.childNodes || svg.children;
if (!!svgChildren && svgChildren.length > 0) {
for (var i = svgChildren.length; i > 0; i--) {
// Move everything into viewport except defs
if (svgChildren[svgChildren.length - i].nodeName !== 'defs') {
viewport.appendChild(svgChildren[svgChildren.length - i]);
}
}
}
svg.appendChild(viewport);
}
// Parse class names
var classNames = [];
if (viewport.getAttribute('class')) {
classNames = viewport.getAttribute('class').split(' ')
}
// Set class (if not set already)
if (!~classNames.indexOf('svg-pan-zoom_viewport')) {
classNames.push('svg-pan-zoom_viewport')
viewport.setAttribute('class', classNames.join(' '))
}
return viewport
}
/**
* Set SVG attributes
*
* @param {SVGSVGElement} svg
*/
, setupSvgAttributes: function(svg) {
// Setting default attributes
svg.setAttribute('xmlns', this.svgNS);
svg.setAttributeNS(this.xmlnsNS, 'xmlns:xlink', this.xlinkNS);
svg.setAttributeNS(this.xmlnsNS, 'xmlns:ev', this.evNS);
// Needed for Internet Explorer, otherwise the viewport overflows
if (svg.parentNode !== null) {
var style = svg.getAttribute('style') || '';
if (style.toLowerCase().indexOf('overflow') === -1) {
svg.setAttribute('style', 'overflow: hidden; ' + style);
}
}
}
/**
* How long Internet Explorer takes to finish updating its display (ms).
*/
, internetExplorerRedisplayInterval: 300
/**
* Forces the browser to redisplay all SVG elements that rely on an
* element defined in a 'defs' section. It works globally, for every
* available defs element on the page.
* The throttling is intentionally global.
*
* This is only needed for IE. It is as a hack to make markers (and 'use' elements?)
* visible after pan/zoom when there are multiple SVGs on the page.
* See bug report: https://connect.microsoft.com/IE/feedback/details/781964/
* also see svg-pan-zoom issue: https://github.com/ariutta/svg-pan-zoom/issues/62
*/
, refreshDefsGlobal: Utils.throttle(function() {
var allDefs = document.querySelectorAll('defs');
var allDefsCount = allDefs.length;
for (var i = 0; i < allDefsCount; i++) {
var thisDefs = allDefs[i];
thisDefs.parentNode.insertBefore(thisDefs, thisDefs);
}
}, this.internetExplorerRedisplayInterval)
/**
* Sets the current transform matrix of an element
*
* @param {SVGElement} element
* @param {SVGMatrix} matrix CTM
* @param {SVGElement} defs
*/
, setCTM: function(element, matrix, defs) {
var that = this
, s = 'matrix(' + matrix.a + ',' + matrix.b + ',' + matrix.c + ',' + matrix.d + ',' + matrix.e + ',' + matrix.f + ')';
element.setAttributeNS(null, 'transform', s);
if ('transform' in element.style) {
element.style.transform = s;
} else if ('-ms-transform' in element.style) {
element.style['-ms-transform'] = s;
} else if ('-webkit-transform' in element.style) {
element.style['-webkit-transform'] = s;
}
// IE has a bug that makes markers disappear on zoom (when the matrix "a" and/or "d" elements change)
// see http://stackoverflow.com/questions/17654578/svg-marker-does-not-work-in-ie9-10
// and http://srndolha.wordpress.com/2013/11/25/svg-line-markers-may-disappear-in-internet-explorer-11/
if (_browser === 'ie' && !!defs) {
// this refresh is intended for redisplaying the SVG during zooming
defs.parentNode.insertBefore(defs, defs);
// this refresh is intended for redisplaying the other SVGs on a page when panning a given SVG
// it is also needed for the given SVG itself, on zoomEnd, if the SVG contains any markers that
// are located under any other element(s).
window.setTimeout(function() {
that.refreshDefsGlobal();
}, that.internetExplorerRedisplayInterval);
}
}
/**
* Instantiate an SVGPoint object with given event coordinates
*
* @param {Event} evt
* @param {SVGSVGElement} svg
* @return {SVGPoint} point
*/
, getEventPoint: function(evt, svg) {
var point = svg.createSVGPoint()
Utils.mouseAndTouchNormalize(evt, svg)
point.x = evt.clientX
point.y = evt.clientY
return point
}
/**
* Get SVG center point
*
* @param {SVGSVGElement} svg
* @return {SVGPoint}
*/
, getSvgCenterPoint: function(svg, width, height) {
return this.createSVGPoint(svg, width / 2, height / 2)
}
/**
* Create a SVGPoint with given x and y
*
* @param {SVGSVGElement} svg
* @param {Number} x
* @param {Number} y
* @return {SVGPoint}
*/
, createSVGPoint: function(svg, x, y) {
var point = svg.createSVGPoint()
point.x = x
point.y = y
return point
}
}
},{"./utilities":7}],6:[function(require,module,exports){
// uniwheel 0.1.2 (customized)
// A unified cross browser mouse wheel event handler
// https://github.com/teemualap/uniwheel
module.exports = (function(){
//Full details: https://developer.mozilla.org/en-US/docs/Web/Reference/Events/wheel
var prefix = "", _addEventListener, _removeEventListener, onwheel, support, fns = [];
// detect event model
if ( window.addEventListener ) {
_addEventListener = "addEventListener";
_removeEventListener = "removeEventListener";
} else {
_addEventListener = "attachEvent";
_removeEventListener = "detachEvent";
prefix = "on";
}
// detect available wheel event
support = "onwheel" in document.createElement("div") ? "wheel" : // Modern browsers support "wheel"
document.onmousewheel !== undefined ? "mousewheel" : // Webkit and IE support at least "mousewheel"
"DOMMouseScroll"; // let's assume that remaining browsers are older Firefox
function createCallback(element,callback,capture) {
var fn = function(originalEvent) {
!originalEvent && ( originalEvent = window.event );
// create a normalized event object
var event = {
// keep a ref to the original event object
originalEvent: originalEvent,
target: originalEvent.target || originalEvent.srcElement,
type: "wheel",
deltaMode: originalEvent.type == "MozMousePixelScroll" ? 0 : 1,
deltaX: 0,
delatZ: 0,
preventDefault: function() {
originalEvent.preventDefault ?
originalEvent.preventDefault() :
originalEvent.returnValue = false;
}
};
// calculate deltaY (and deltaX) according to the event
if ( support == "mousewheel" ) {
event.deltaY = - 1/40 * originalEvent.wheelDelta;
// Webkit also support wheelDeltaX
originalEvent.wheelDeltaX && ( event.deltaX = - 1/40 * originalEvent.wheelDeltaX );
} else {
event.deltaY = originalEvent.detail;
}
// it's time to fire the callback
return callback( event );
};
fns.push({
element: element,
fn: fn,
capture: capture
});
return fn;
}
function getCallback(element,capture) {
for (var i = 0; i < fns.length; i++) {
if (fns[i].element === element && fns[i].capture === capture) {
return fns[i].fn;
}
}
return function(){};
}
function removeCallback(element,capture) {
for (var i = 0; i < fns.length; i++) {
if (fns[i].element === element && fns[i].capture === capture) {
return fns.splice(i,1);
}
}
}
function _addWheelListener( elem, eventName, callback, useCapture ) {
var cb;
if (support === "wheel") {
cb = callback;
} else {
cb = createCallback(elem,callback,useCapture);
}
elem[ _addEventListener ]( prefix + eventName, cb, useCapture || false );
}
function _removeWheelListener( elem, eventName, callback, useCapture ) {
var cb;
if (support === "wheel") {
cb = callback;
} else {
cb = getCallback(elem,useCapture);
}
elem[ _removeEventListener ]( prefix + eventName, cb, useCapture || false );
removeCallback(elem,useCapture);
}
function addWheelListener( elem, callback, useCapture ) {
_addWheelListener( elem, support, callback, useCapture );
// handle MozMousePixelScroll in older Firefox
if( support == "DOMMouseScroll" ) {
_addWheelListener( elem, "MozMousePixelScroll", callback, useCapture);
}
}
function removeWheelListener(elem,callback,useCapture){
_removeWheelListener(elem,support,callback,useCapture);
// handle MozMousePixelScroll in older Firefox
if( support == "DOMMouseScroll" ) {
_removeWheelListener(elem, "MozMousePixelScroll", callback, useCapture);
}
}
return {
on: addWheelListener,
off: removeWheelListener
};
})();
},{}],7:[function(require,module,exports){
module.exports = {
/**
* Extends an object
*
* @param {Object} target object to extend
* @param {Object} source object to take properties from
* @return {Object} extended object
*/
extend: function(target, source) {
target = target || {};
for (var prop in source) {
// Go recursively
if (this.isObject(source[prop])) {
target[prop] = this.extend(target[prop], source[prop])
} else {
target[prop] = source[prop]
}
}
return target;
}
/**
* Checks if an object is a DOM element
*
* @param {Object} o HTML element or String
* @return {Boolean} returns true if object is a DOM element
*/
, isElement: function(o){
return (
o instanceof HTMLElement || o instanceof SVGElement || o instanceof SVGSVGElement || //DOM2
(o && typeof o === 'object' && o !== null && o.nodeType === 1 && typeof o.nodeName === 'string')
);
}
/**
* Checks if an object is an Object
*
* @param {Object} o Object
* @return {Boolean} returns true if object is an Object
*/
, isObject: function(o){
return Object.prototype.toString.call(o) === '[object Object]';
}
/**
* Checks if variable is Number
*
* @param {Integer|Float} n
* @return {Boolean} returns true if variable is Number
*/
, isNumber: function(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
/**
* Search for an SVG element
*
* @param {Object|String} elementOrSelector DOM Element or selector String
* @return {Object|Null} SVG or null
*/
, getSvg: function(elementOrSelector) {
var element
, svg;
if (!this.isElement(elementOrSelector)) {
// If selector provided
if (typeof elementOrSelector === 'string' || elementOrSelector instanceof String) {
// Try to find the element
element = document.querySelector(elementOrSelector)
if (!element) {
throw new Error('Provided selector did not find any elements. Selector: ' + elementOrSelector)
return null
}
} else {
throw new Error('Provided selector is not an HTML object nor String')
return null
}
} else {
element = elementOrSelector
}
if (element.tagName.toLowerCase() === 'svg') {
svg = element;
} else {
if (element.tagName.toLowerCase() === 'object') {
svg = element.contentDocument.documentElement;
} else {
if (element.tagName.toLowerCase() === 'embed') {
svg = element.getSVGDocument().documentElement;
} else {
if (element.tagName.toLowerCase() === 'img') {
throw new Error('Cannot script an SVG in an "img" element. Please use an "object" element or an in-line SVG.');
} else {
throw new Error('Cannot get SVG.');
}
return null
}
}
}
return svg
}
/**
* Attach a given context to a function
* @param {Function} fn Function
* @param {Object} context Context
* @return {Function} Function with certain context
*/
, proxy: function(fn, context) {
return function() {
return fn.apply(context, arguments)
}
}
/**
* Returns object type
* Uses toString that returns [object SVGPoint]
* And than parses object type from string
*
* @param {Object} o Any object
* @return {String} Object type
*/
, getType: function(o) {
return Object.prototype.toString.apply(o).replace(/^\[object\s/, '').replace(/\]$/, '')
}
/**
* If it is a touch event than add clientX and clientY to event object
*
* @param {Event} evt
* @param {SVGSVGElement} svg
*/
, mouseAndTouchNormalize: function(evt, svg) {
// If no clientX then fallback
if (evt.clientX === void 0 || evt.clientX === null) {
// Fallback
evt.clientX = 0
evt.clientY = 0
// If it is a touch event
if (evt.touches !== void 0 && evt.touches.length) {
if (evt.touches[0].clientX !== void 0) {
evt.clientX = evt.touches[0].clientX
evt.clientY = evt.touches[0].clientY
} else if (evt.touches[0].pageX !== void 0) {
var rect = svg.getBoundingClientRect();
evt.clientX = evt.touches[0].pageX - rect.left
evt.clientY = evt.touches[0].pageY - rect.top
}
// If it is a custom event
} else if (evt.originalEvent !== void 0) {
if (evt.originalEvent.clientX !== void 0) {
evt.clientX = evt.originalEvent.clientX
evt.clientY = evt.originalEvent.clientY
}
}
}
}
/**
* Check if an event is a double click/tap
* TODO: For touch gestures use a library (hammer.js) that takes in account other events
* (touchmove and touchend). It should take in account tap duration and traveled distance
*
* @param {Event} evt
* @param {Event} prevEvt Previous Event
* @return {Boolean}
*/
, isDblClick: function(evt, prevEvt) {
// Double click detected by browser
if (evt.detail === 2) {
return true;
}
// Try to compare events
else if (prevEvt !== void 0 && prevEvt !== null) {
var timeStampDiff = evt.timeStamp - prevEvt.timeStamp // should be lower than 250 ms
, touchesDistance = Math.sqrt(Math.pow(evt.clientX - prevEvt.clientX, 2) + Math.pow(evt.clientY - prevEvt.clientY, 2))
return timeStampDiff < 250 && touchesDistance < 10
}
// Nothing found
return false;
}
/**
* Returns current timestamp as an integer
*
* @return {Number}
*/
, now: Date.now || function() {
return new Date().getTime();
}
// From underscore.
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time. Normally, the throttled function will run
// as much as it can, without ever going more than once per `wait` duration;
// but if you'd like to disable the execution on the leading edge, pass
// `{leading: false}`. To disable execution on the trailing edge, ditto.
// jscs:disable
// jshint ignore:start
, throttle: function(func, wait, options) {
var that = this;
var context, args, result;
var timeout = null;
var previous = 0;
if (!options) options = {};
var later = function() {
previous = options.leading === false ? 0 : that.now();
timeout = null;
result = func.apply(context, args);
if (!timeout) context = args = null;
};
return function() {
var now = that.now();
if (!previous && options.leading === false) previous = now;
var remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0 || remaining > wait) {
clearTimeout(timeout);
timeout = null;
previous = now;
result = func.apply(context, args);
if (!timeout) context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = setTimeout(later, remaining);
}
return result;
};
}
// jshint ignore:end
// jscs:enable
/**
* Create a requestAnimationFrame simulation
*
* @param {Number|String} refreshRate
* @return {Function}
*/
, createRequestAnimationFrame: function(refreshRate) {
var timeout = null
// Convert refreshRate to timeout
if (refreshRate !== 'auto' && refreshRate < 60 && refreshRate > 1) {
timeout = Math.floor(1000 / refreshRate)
}
if (timeout === null) {
return window.requestAnimationFrame || requestTimeout(33)
} else {
return requestTimeout(timeout)
}
}
}
/**
* Create a callback that will execute after a given timeout
*
* @param {Function} timeout
* @return {Function}
*/
function requestTimeout(timeout) {
return function(callback) {
window.setTimeout(callback, timeout)
}
}
},{}]},{},[1]); | PypiClean |
/Django-Gtts-0.4.tar.gz/Django-Gtts-0.4/gTTS/templatetags/gTTS.py | from django import template
from django.conf import settings
try:
# Django 2
from django.contrib.staticfiles.templatetags.staticfiles import static
except ModuleNotFoundError:
# Django 3
from django.templatetags.static import static
from gtts import gTTS
from os import path, makedirs, remove
from datetime import datetime
from sys import version_info
from uuid import uuid4 as uuid
from ..models import Speech
cur_dir = path.join(path.dirname(path.abspath(__file__)), '..')
dir_name = 'gTTS'
temp_path = path.join(
cur_dir,
path.join(
getattr(settings, 'STATIC_URL', ' ')[1:],
dir_name
)
)
register = template.Library()
@register.simple_tag
def say(
language='en-us',
text='Flask says Hi!'):
for h, a in {'language': language, 'text': text}.items():
if not isinstance(a, str): # check if receiving a string
raise(TypeError("gTTS.say(%s) takes string" % h))
try:
ext_file = Speech.objects.get(text=text, language=language)
if not isfile(path.join(temp_path, ext_file.file_name)):
for file in Speech.objects.filter(
text=text, language=language).all():
file.delete()
ext_file = None
except Exception:
ext_file = None
if not path.isdir(temp_path): # creating temporary directory
makedirs(temp_path) if version_info.major == 2 else makedirs(
# makedirs in py2 missing exist_ok
temp_path, exist_ok=True
)
if ext_file is None:
s = gTTS(text) if language == 'skip' else gTTS(
text,
lang=language)
while True: # making sure audio file name is truly unique
fname = str(uuid()) + '.mp3'
abp_fname = path.join(temp_path, fname)
if not path.isfile(abp_fname):
break
Speech(text=text,
language=language,
file_name=fname).save()
s.save(abp_fname)
else:
fname = ext_file.file_name
return static('/'.join([dir_name, fname])) | PypiClean |
/Cryptonet-0.0.5.tar.gz/Cryptonet-0.0.5/cryptonet/utilities.py | import hashlib
import sys
import sha3
import time
import pprint as pprint_module
from binascii import hexlify, unhexlify
import cryptonet
from cryptonet.debug import debug
from cryptonet.errors import ChainError, ValidationError
#==============================================================================
# GENERAL CRYPTONET FUNCTIONS
#==============================================================================
def i2b(x):
"""
Take and integer and return bytes with no \x00 padding.
:param x: input integer
:return: bytes
"""
return x.to_bytes((x.bit_length() - 1) // 8 + 1, 'big')
def num2bits(n, minlen=0):
n = int(n)
r = []
while n > 0:
r.append(n % 2)
n //= 2
pad = minlen - len(r)
while pad > 0:
r.append(0)
pad -= 1
return r[::-1]
def random_peer(p2p):
p2p.peers()
def global_hash(msg, length=None):
''' This is the hash function that should be used EVERYWHERE in GPDHT.
Currently defined to be SHA3.
Returns int, should accept int'''
s = hashlib.sha3_256()
if not isinstance(msg, int):
s.update(bytes(msg))
else:
if length == None:
length = msg.bit_length() // 8 + 1
s.update(msg.to_bytes(length, 'big'))
return int.from_bytes(s.digest(), 'big')
def dsha256R(msg):
''' Return a dsha256 hash reversed
'''
return dsha256(msg)[::-1]
def dsha256(msg):
''' Input should be bytes
'''
return sha256(sha256(msg))
def sha256(msg):
s = hashlib.sha256()
s.update(msg)
return s.digest()
def _split_varint_and_bytes(int_location, bytes):
return (bytes[int_location[0]:int_location[1]], bytes[int_location[1]:])
def get_varint_and_remainder(bytes):
if bytes[0] < 0xfd:
return _split_varint_and_bytes((0, 1), bytes)
if bytes[0] == 0xfd:
return _split_varint_and_bytes((1, 3), bytes)
if bytes[0] == 0xfe:
return _split_varint_and_bytes((1, 5), bytes)
if bytes[0] == 0xff:
return _split_varint_and_bytes((1, 9), bytes)
time_as_int = lambda: int(time.time())
def create_index(labels):
# starts at 1
dict(zip(labels, [i+1 for i in range(len(labels))]))
pp = pprint_module.PrettyPrinter(indent=4)
def pretty_string(obj):
return pp.pformat(obj) | PypiClean |
/Beeswarm-0.7.18.tar.gz/Beeswarm-0.7.18/beeswarm/drones/honeypot/capabilities/ssh.py |
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
from telnetsrv.paramiko_ssh import SSHHandler, TelnetToPtyHandler
from paramiko import RSAKey
from paramiko.ssh_exception import SSHException
from beeswarm.drones.honeypot.capabilities.handlerbase import HandlerBase
from beeswarm.drones.honeypot.capabilities.shared.shell import Commands
logger = logging.getLogger(__name__)
class SSH(HandlerBase):
def __init__(self, options, work_dir, key='server.key'):
logging.getLogger("telnetsrv.paramiko_ssh ").setLevel(logging.WARNING)
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.key = os.path.join(work_dir, key)
super(SSH, self).__init__(options, work_dir)
def handle_session(self, gsocket, address):
session = self.create_session(address)
try:
SshWrapper(address, None, gsocket, session, self.options, self.vfsystem, self.key)
except (SSHException, EOFError) as ex:
logger.debug('Unexpected end of ssh session: {0}. ({1})'.format(ex, session.id))
finally:
self.close_session(session)
class BeeTelnetHandler(Commands):
def __init__(self, request, client_address, server, vfs, session):
Commands.__init__(self, request, client_address, server, vfs, session)
class SshWrapper(SSHHandler):
"""
Wraps the telnetsrv paramiko module to fit the Honeypot architecture.
"""
WELCOME = '...'
HOSTNAME = 'host'
PROMPT = None
telnet_handler = BeeTelnetHandler
def __init__(self, client_address, server, socket, session, options, vfs, key):
self.session = session
self.auth_count = 0
self.vfs = vfs
self.working_dir = None
self.username = None
SshWrapper.host_key = RSAKey(filename=key)
request = SshWrapper.dummy_request()
request._sock = socket
SSHHandler.__init__(self, request, client_address, server)
class __MixedPtyHandler(TelnetToPtyHandler, BeeTelnetHandler):
# BaseRequestHandler does not inherit from object, must call the __init__ directly
def __init__(self, *args):
TelnetToPtyHandler.__init__(self, *args)
self.pty_handler = __MixedPtyHandler
def authCallbackUsername(self, username):
# make sure no one can logon
raise
def authCallback(self, username, password):
self.session.activity()
if self.session.try_auth('plaintext', username=username, password=password):
self.working_dir = '/'
self.username = username
self.telnet_handler.PROMPT = '[{0}@{1} {2}]$ '.format(self.username, self.HOSTNAME, self.working_dir)
return True
raise
def finish(self):
self.session.end_session()
def setup(self):
self.transport.load_server_moduli()
self.transport.add_server_key(self.host_key)
self.transport.start_server(server=self)
while True:
channel = self.transport.accept(20)
if channel is None:
# check to see if any thread is running
any_running = False
for _, thread in self.channels.items():
if thread.is_alive():
any_running = True
break
if not any_running:
break
def start_pty_request(self, channel, term, modes):
"""Start a PTY - intended to run it a (green)thread."""
request = self.dummy_request()
request._sock = channel
request.modes = modes
request.term = term
request.username = self.username
# This should block until the user quits the pty
self.pty_handler(request, self.client_address, self.tcp_server, self.vfs, self.session)
# Shutdown the entire session
self.transport.close() | PypiClean |
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/src/obitools/options/bioseqcutter.py | from logging import debug
def _beginOptionCallback(options,opt,value,parser):
def beginCutPosition(seq):
debug("begin = %s" % value )
if hasattr(options, 'taxonomy') and options.taxonomy is not None:
environ = {'taxonomy' : options.taxonomy,'sequence':seq}
else:
environ = {'sequence':seq}
return eval(value,environ,seq) - 1
parser.values.beginCutPosition=beginCutPosition
def _endOptionCallback(options,opt,value,parser):
def endCutPosition(seq):
if hasattr(options, 'taxonomy') and options.taxonomy is not None:
environ = {'taxonomy' : options.taxonomy,'sequence':seq}
else:
environ = {'sequence':seq}
return eval(value,environ,seq)
parser.values.endCutPosition=endCutPosition
def addSequenceCuttingOptions(optionManager):
group = optionManager.add_option_group('Cutting options')
group.add_option('-b','--begin',
action="callback", callback=_beginOptionCallback,
metavar="<PYTHON_EXPRESSION>",
type="string",
help="python expression to be evaluated in the "
"sequence context. The attribute name can be "
"used in the expression as variable name. "
"An extra variable named 'sequence' refers "
"to the sequence object itself. ")
group.add_option('-e','--end',
action="callback", callback=_endOptionCallback,
metavar="<PYTHON_EXPRESSION>",
type="string",
help="python expression to be evaluated in the "
"sequence context. The attribute name can be "
"used in the expression as variable name ."
"An extra variable named 'sequence' refers"
"to the sequence object itself. ")
def cutterGenerator(options):
def sequenceCutter(seq):
lseq = len(seq)
if hasattr(options, 'beginCutPosition'):
begin = int(options.beginCutPosition(seq))
else:
begin = 0
if hasattr(options, 'endCutPosition'):
end = int(options.endCutPosition(seq))
else:
end = lseq
if begin > 0 or end < lseq:
seq = seq[begin:end]
seq['subsequence']="%d..%d" % (begin+1,end)
return seq
return sequenceCutter
def cutterIteratorGenerator(options):
_cutter = cutterGenerator(options)
def sequenceCutterIterator(seqIterator):
for seq in seqIterator:
yield _cutter(seq)
return sequenceCutterIterator | PypiClean |
/CephQeSdk-1.0.0.tar.gz/CephQeSdk-1.0.0/src/RhcsQeSdk/core/cli/radosgw_admin/role.py | import logging
from copy import deepcopy
import RhcsQeSdk.core.cli.fabfile as fabfile
from RhcsQeSdk.core.utilities import core_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class Role:
"""
This module provides CLI interface to manage role operations.
"""
def __init__(self, base_cmd):
self.base_cmd = base_cmd + " role"
def create(self, **kw):
"""Create a new AWS role for use with STS.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example::
Supported keys:
role-name(str): takes name of the role
path(str): path to role. The default value is a slash(/).(optioal)
assume-role-policy-doc(str): The trust relationship policy document that grants an
entity permission to assume the role.(optional)
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
kw_copy = deepcopy(kw)
role_name = kw_copy.pop("role-name", "")
cmd = (
self.base_cmd
+ f" create --role-name={role_name}"
+ core_utils.build_cmd_args(kw=kw_copy)
)
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def rm(self, **kw):
"""Remove a role.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example::
Supported keys:
role-name(str): takes name of the role
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
role_name = kw.get("role-name")
cmd = self.base_cmd + f" rm --role-name={role_name}"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def get_(self, **kw):
"""Get a role.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example::
Supported keys:
role-name(str): takes name of the role
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
role_name = kw.get("role-name")
cmd = self.base_cmd + f" get --role-name={role_name}"
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def list_(self, **kw):
"""List the roles with specified path prefix.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example:
Supported keys:
path-prefix(str): Path prefix for filtering roles. If this is not specified,
all roles are listed.(optional)
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
cmd = self.base_cmd + " list" + core_utils.build_cmd_args(kw=kw)
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config"))
def modify(self, **kw):
"""Modify the assume role policy of an existing role.
Args:
kw(dict): Key/value pairs that needs to be provided to the installer
Example::
Supported keys:
role-name(str): takes name of the role
assume-role-policy-doc(str): The trust relationship policy document that grants an
entity permission to assume the role.
Returns:
Dict(str)
A mapping of host strings to the given task’s return value for that host’s execution run
"""
kw = kw.get("kw")
role_name = kw.get("role-name")
trust_policy_document = kw.get("assume-role-policy-doc")
cmd = (
self.base_cmd
+ f" modify --role-name={role_name} --assume-role-policy-doc={trust_policy_document}"
)
logger.info(f"Running command {cmd}")
return fabfile.run_command(cmd, config=kw.get("env_config")) | PypiClean |
/LiPD-0.2.8.9.tar.gz/LiPD-0.2.8.9/docs/_build/html/_static/doctools.js | * select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s == 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node) {
if (node.nodeType == 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
var span = document.createElement("span");
span.className = className;
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this);
});
}
}
return this.each(function() {
highlight(this);
});
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated == 'undefined')
return string;
return (typeof translated == 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated == 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this == '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
},
initOnKeyListeners: function() {
$(document).keyup(function(event) {
var activeElementType = document.activeElement.tagName;
// don't navigate when in search box or textarea
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
if (prevHref) {
window.location.href = prevHref;
return false;
}
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
}
}
});
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | PypiClean |
/NuInfoSys-1.0.0.tar.gz/NuInfoSys-1.0.0/README.md | # NuInfoSys
currently INCOMPLETE -- kekw malding
## Future Goals
* Custom Images (DOTS files)
* Custom Animations (Chaining DOTS files)
* Image + Animation Designer Web App / Software
* A way to know if the current animation list would require too much
memory would be really cool, but would take some investigation
## Codestyle
I have decided to commit to EXTREMELY STRICT typing for this project,
in order to avoid confusion completely and absolutely. You MUST follow
these rules or I won't even look at your code:
* ALL assignments have explicit types
* ALL RE-assignments have explicit types
* ALL functions have return types
* ALL function parameters have types
* Follow any other typing conventions you have heard of
If you follow all these rules and there is still something weird in
your code I will mention it in the review.
## Running from the CLI
In order to run NuInfoSys from the command line, use the following
command:
```python3.6 -m NuInfoSys.MODULE```
Usually MODULE will be betabrite (i.e. ```python3.6 -m NuInfoSys.betabrite```
## Current Goals, in Rank of Needed Completion
* Memory management
| PypiClean |
/JATA-0.3.5-py3-none-any.whl/pixplot/web/assets/vendor/dist/tweenlite.min.js | !function(a,b){"use strict";var c={},d=a.document,e=a.GreenSockGlobals=a.GreenSockGlobals||a,f=e[b];if(f)return"undefined"!=typeof module&&module.exports&&(module.exports=f),f;var g,h,i,j,k,l=function(a){var b,c=a.split("."),d=e;for(b=0;b<c.length;b++)d[c[b]]=d=d[c[b]]||{};return d},m=l("com.greensock"),n=1e-10,o=function(a){var b,c=[],d=a.length;for(b=0;b!==d;c.push(a[b++]));return c},p=function(){},q=function(){var a=Object.prototype.toString,b=a.call([]);return function(c){return null!=c&&(c instanceof Array||"object"==typeof c&&!!c.push&&a.call(c)===b)}}(),r={},s=function(d,f,g,h){this.sc=r[d]?r[d].sc:[],r[d]=this,this.gsClass=null,this.func=g;var i=[];this.check=function(j){for(var k,m,n,o,p=f.length,q=p;--p>-1;)(k=r[f[p]]||new s(f[p],[])).gsClass?(i[p]=k.gsClass,q--):j&&k.sc.push(this);if(0===q&&g){if(m=("com.greensock."+d).split("."),n=m.pop(),o=l(m.join("."))[n]=this.gsClass=g.apply(g,i),h)if(e[n]=c[n]=o,"undefined"!=typeof module&&module.exports)if(d===b){module.exports=c[b]=o;for(p in c)o[p]=c[p]}else c[b]&&(c[b][n]=o);else"function"==typeof define&&define.amd&&define((a.GreenSockAMDPath?a.GreenSockAMDPath+"/":"")+d.split(".").pop(),[],function(){return o});for(p=0;p<this.sc.length;p++)this.sc[p].check()}},this.check(!0)},t=a._gsDefine=function(a,b,c,d){return new s(a,b,c,d)},u=m._class=function(a,b,c){return b=b||function(){},t(a,[],function(){return b},c),b};t.globals=e;var v=[0,0,1,1],w=u("easing.Ease",function(a,b,c,d){this._func=a,this._type=c||0,this._power=d||0,this._params=b?v.concat(b):v},!0),x=w.map={},y=w.register=function(a,b,c,d){for(var e,f,g,h,i=b.split(","),j=i.length,k=(c||"easeIn,easeOut,easeInOut").split(",");--j>-1;)for(f=i[j],e=d?u("easing."+f,null,!0):m.easing[f]||{},g=k.length;--g>-1;)h=k[g],x[f+"."+h]=x[h+f]=e[h]=a.getRatio?a:a[h]||new a};for(i=w.prototype,i._calcEnd=!1,i.getRatio=function(a){if(this._func)return this._params[0]=a,this._func.apply(null,this._params);var b=this._type,c=this._power,d=1===b?1-a:2===b?a:.5>a?2*a:2*(1-a);return 1===c?d*=d:2===c?d*=d*d:3===c?d*=d*d*d:4===c&&(d*=d*d*d*d),1===b?1-d:2===b?d:.5>a?d/2:1-d/2},g=["Linear","Quad","Cubic","Quart","Quint,Strong"],h=g.length;--h>-1;)i=g[h]+",Power"+h,y(new w(null,null,1,h),i,"easeOut",!0),y(new w(null,null,2,h),i,"easeIn"+(0===h?",easeNone":"")),y(new w(null,null,3,h),i,"easeInOut");x.linear=m.easing.Linear.easeIn,x.swing=m.easing.Quad.easeInOut;var z=u("events.EventDispatcher",function(a){this._listeners={},this._eventTarget=a||this});i=z.prototype,i.addEventListener=function(a,b,c,d,e){e=e||0;var f,g,h=this._listeners[a],i=0;for(this!==j||k||j.wake(),null==h&&(this._listeners[a]=h=[]),g=h.length;--g>-1;)f=h[g],f.c===b&&f.s===c?h.splice(g,1):0===i&&f.pr<e&&(i=g+1);h.splice(i,0,{c:b,s:c,up:d,pr:e})},i.removeEventListener=function(a,b){var c,d=this._listeners[a];if(d)for(c=d.length;--c>-1;)if(d[c].c===b)return void d.splice(c,1)},i.dispatchEvent=function(a){var b,c,d,e=this._listeners[a];if(e)for(b=e.length,b>1&&(e=e.slice(0)),c=this._eventTarget;--b>-1;)d=e[b],d&&(d.up?d.c.call(d.s||c,{type:a,target:c}):d.c.call(d.s||c))};var A=a.requestAnimationFrame,B=a.cancelAnimationFrame,C=Date.now||function(){return(new Date).getTime()},D=C();for(g=["ms","moz","webkit","o"],h=g.length;--h>-1&&!A;)A=a[g[h]+"RequestAnimationFrame"],B=a[g[h]+"CancelAnimationFrame"]||a[g[h]+"CancelRequestAnimationFrame"];u("Ticker",function(a,b){var c,e,f,g,h,i=this,l=C(),m=b!==!1&&A?"auto":!1,o=500,q=33,r="tick",s=function(a){var b,d,j=C()-D;j>o&&(l+=j-q),D+=j,i.time=(D-l)/1e3,b=i.time-h,(!c||b>0||a===!0)&&(i.frame++,h+=b+(b>=g?.004:g-b),d=!0),a!==!0&&(f=e(s)),d&&i.dispatchEvent(r)};z.call(i),i.time=i.frame=0,i.tick=function(){s(!0)},i.lagSmoothing=function(a,b){return arguments.length?(o=a||1/n,void(q=Math.min(b,o,0))):1/n>o},i.sleep=function(){null!=f&&(m&&B?B(f):clearTimeout(f),e=p,f=null,i===j&&(k=!1))},i.wake=function(a){null!==f?i.sleep():a?l+=-D+(D=C()):i.frame>10&&(D=C()-o+5),e=0===c?p:m&&A?A:function(a){return setTimeout(a,1e3*(h-i.time)+1|0)},i===j&&(k=!0),s(2)},i.fps=function(a){return arguments.length?(c=a,g=1/(c||60),h=this.time+g,void i.wake()):c},i.useRAF=function(a){return arguments.length?(i.sleep(),m=a,void i.fps(c)):m},i.fps(a),setTimeout(function(){"auto"===m&&i.frame<5&&"hidden"!==(d||{}).visibilityState&&i.useRAF(!1)},1500)}),i=m.Ticker.prototype=new m.events.EventDispatcher,i.constructor=m.Ticker;var E=u("core.Animation",function(a,b){if(this.vars=b=b||{},this._duration=this._totalDuration=a||0,this._delay=Number(b.delay)||0,this._timeScale=1,this._active=b.immediateRender===!0,this.data=b.data,this._reversed=b.reversed===!0,Y){k||j.wake();var c=this.vars.useFrames?X:Y;c.add(this,c._time),this.vars.paused&&this.paused(!0)}});j=E.ticker=new m.Ticker,i=E.prototype,i._dirty=i._gc=i._initted=i._paused=!1,i._totalTime=i._time=0,i._rawPrevTime=-1,i._next=i._last=i._onUpdate=i._timeline=i.timeline=null,i._paused=!1;var F=function(){k&&C()-D>2e3&&("hidden"!==(d||{}).visibilityState||!j.lagSmoothing())&&j.wake();var a=setTimeout(F,2e3);a.unref&&a.unref()};F(),i.play=function(a,b){return null!=a&&this.seek(a,b),this.reversed(!1).paused(!1)},i.pause=function(a,b){return null!=a&&this.seek(a,b),this.paused(!0)},i.resume=function(a,b){return null!=a&&this.seek(a,b),this.paused(!1)},i.seek=function(a,b){return this.totalTime(Number(a),b!==!1)},i.restart=function(a,b){return this.reversed(!1).paused(!1).totalTime(a?-this._delay:0,b!==!1,!0)},i.reverse=function(a,b){return null!=a&&this.seek(a||this.totalDuration(),b),this.reversed(!0).paused(!1)},i.render=function(a,b,c){},i.invalidate=function(){return this._time=this._totalTime=0,this._initted=this._gc=!1,this._rawPrevTime=-1,(this._gc||!this.timeline)&&this._enabled(!0),this},i.isActive=function(){var a,b=this._timeline,c=this._startTime;return!b||!this._gc&&!this._paused&&b.isActive()&&(a=b.rawTime(!0))>=c&&a<c+this.totalDuration()/this._timeScale-1e-7},i._enabled=function(a,b){return k||j.wake(),this._gc=!a,this._active=this.isActive(),b!==!0&&(a&&!this.timeline?this._timeline.add(this,this._startTime-this._delay):!a&&this.timeline&&this._timeline._remove(this,!0)),!1},i._kill=function(a,b){return this._enabled(!1,!1)},i.kill=function(a,b){return this._kill(a,b),this},i._uncache=function(a){for(var b=a?this:this.timeline;b;)b._dirty=!0,b=b.timeline;return this},i._swapSelfInParams=function(a){for(var b=a.length,c=a.concat();--b>-1;)"{self}"===a[b]&&(c[b]=this);return c},i._callback=function(a){var b=this.vars,c=b[a],d=b[a+"Params"],e=b[a+"Scope"]||b.callbackScope||this,f=d?d.length:0;switch(f){case 0:c.call(e);break;case 1:c.call(e,d[0]);break;case 2:c.call(e,d[0],d[1]);break;default:c.apply(e,d)}},i.eventCallback=function(a,b,c,d){if("on"===(a||"").substr(0,2)){var e=this.vars;if(1===arguments.length)return e[a];null==b?delete e[a]:(e[a]=b,e[a+"Params"]=q(c)&&-1!==c.join("").indexOf("{self}")?this._swapSelfInParams(c):c,e[a+"Scope"]=d),"onUpdate"===a&&(this._onUpdate=b)}return this},i.delay=function(a){return arguments.length?(this._timeline.smoothChildTiming&&this.startTime(this._startTime+a-this._delay),this._delay=a,this):this._delay},i.duration=function(a){return arguments.length?(this._duration=this._totalDuration=a,this._uncache(!0),this._timeline.smoothChildTiming&&this._time>0&&this._time<this._duration&&0!==a&&this.totalTime(this._totalTime*(a/this._duration),!0),this):(this._dirty=!1,this._duration)},i.totalDuration=function(a){return this._dirty=!1,arguments.length?this.duration(a):this._totalDuration},i.time=function(a,b){return arguments.length?(this._dirty&&this.totalDuration(),this.totalTime(a>this._duration?this._duration:a,b)):this._time},i.totalTime=function(a,b,c){if(k||j.wake(),!arguments.length)return this._totalTime;if(this._timeline){if(0>a&&!c&&(a+=this.totalDuration()),this._timeline.smoothChildTiming){this._dirty&&this.totalDuration();var d=this._totalDuration,e=this._timeline;if(a>d&&!c&&(a=d),this._startTime=(this._paused?this._pauseTime:e._time)-(this._reversed?d-a:a)/this._timeScale,e._dirty||this._uncache(!1),e._timeline)for(;e._timeline;)e._timeline._time!==(e._startTime+e._totalTime)/e._timeScale&&e.totalTime(e._totalTime,!0),e=e._timeline}this._gc&&this._enabled(!0,!1),(this._totalTime!==a||0===this._duration)&&(K.length&&$(),this.render(a,b,!1),K.length&&$())}return this},i.progress=i.totalProgress=function(a,b){var c=this.duration();return arguments.length?this.totalTime(c*a,b):c?this._time/c:this.ratio},i.startTime=function(a){return arguments.length?(a!==this._startTime&&(this._startTime=a,this.timeline&&this.timeline._sortChildren&&this.timeline.add(this,a-this._delay)),this):this._startTime},i.endTime=function(a){return this._startTime+(0!=a?this.totalDuration():this.duration())/this._timeScale},i.timeScale=function(a){if(!arguments.length)return this._timeScale;var b,c;for(a=a||n,this._timeline&&this._timeline.smoothChildTiming&&(b=this._pauseTime,c=b||0===b?b:this._timeline.totalTime(),this._startTime=c-(c-this._startTime)*this._timeScale/a),this._timeScale=a,c=this.timeline;c&&c.timeline;)c._dirty=!0,c.totalDuration(),c=c.timeline;return this},i.reversed=function(a){return arguments.length?(a!=this._reversed&&(this._reversed=a,this.totalTime(this._timeline&&!this._timeline.smoothChildTiming?this.totalDuration()-this._totalTime:this._totalTime,!0)),this):this._reversed},i.paused=function(a){if(!arguments.length)return this._paused;var b,c,d=this._timeline;return a!=this._paused&&d&&(k||a||j.wake(),b=d.rawTime(),c=b-this._pauseTime,!a&&d.smoothChildTiming&&(this._startTime+=c,this._uncache(!1)),this._pauseTime=a?b:null,this._paused=a,this._active=this.isActive(),!a&&0!==c&&this._initted&&this.duration()&&(b=d.smoothChildTiming?this._totalTime:(b-this._startTime)/this._timeScale,this.render(b,b===this._totalTime,!0))),this._gc&&!a&&this._enabled(!0,!1),this};var G=u("core.SimpleTimeline",function(a){E.call(this,0,a),this.autoRemoveChildren=this.smoothChildTiming=!0});i=G.prototype=new E,i.constructor=G,i.kill()._gc=!1,i._first=i._last=i._recent=null,i._sortChildren=!1,i.add=i.insert=function(a,b,c,d){var e,f;if(a._startTime=Number(b||0)+a._delay,a._paused&&this!==a._timeline&&(a._pauseTime=this.rawTime()-(a._timeline.rawTime()-a._pauseTime)),a.timeline&&a.timeline._remove(a,!0),a.timeline=a._timeline=this,a._gc&&a._enabled(!0,!0),e=this._last,this._sortChildren)for(f=a._startTime;e&&e._startTime>f;)e=e._prev;return e?(a._next=e._next,e._next=a):(a._next=this._first,this._first=a),a._next?a._next._prev=a:this._last=a,a._prev=e,this._recent=a,this._timeline&&this._uncache(!0),this},i._remove=function(a,b){return a.timeline===this&&(b||a._enabled(!1,!0),a._prev?a._prev._next=a._next:this._first===a&&(this._first=a._next),a._next?a._next._prev=a._prev:this._last===a&&(this._last=a._prev),a._next=a._prev=a.timeline=null,a===this._recent&&(this._recent=this._last),this._timeline&&this._uncache(!0)),this},i.render=function(a,b,c){var d,e=this._first;for(this._totalTime=this._time=this._rawPrevTime=a;e;)d=e._next,(e._active||a>=e._startTime&&!e._paused&&!e._gc)&&(e._reversed?e.render((e._dirty?e.totalDuration():e._totalDuration)-(a-e._startTime)*e._timeScale,b,c):e.render((a-e._startTime)*e._timeScale,b,c)),e=d},i.rawTime=function(){return k||j.wake(),this._totalTime};var H=u("TweenLite",function(b,c,d){if(E.call(this,c,d),this.render=H.prototype.render,null==b)throw"Cannot tween a null target.";this.target=b="string"!=typeof b?b:H.selector(b)||b;var e,f,g,h=b.jquery||b.length&&b!==a&&b[0]&&(b[0]===a||b[0].nodeType&&b[0].style&&!b.nodeType),i=this.vars.overwrite;if(this._overwrite=i=null==i?W[H.defaultOverwrite]:"number"==typeof i?i>>0:W[i],(h||b instanceof Array||b.push&&q(b))&&"number"!=typeof b[0])for(this._targets=g=o(b),this._propLookup=[],this._siblings=[],e=0;e<g.length;e++)f=g[e],f?"string"!=typeof f?f.length&&f!==a&&f[0]&&(f[0]===a||f[0].nodeType&&f[0].style&&!f.nodeType)?(g.splice(e--,1),this._targets=g=g.concat(o(f))):(this._siblings[e]=_(f,this,!1),1===i&&this._siblings[e].length>1&&ba(f,this,null,1,this._siblings[e])):(f=g[e--]=H.selector(f),"string"==typeof f&&g.splice(e+1,1)):g.splice(e--,1);else this._propLookup={},this._siblings=_(b,this,!1),1===i&&this._siblings.length>1&&ba(b,this,null,1,this._siblings);(this.vars.immediateRender||0===c&&0===this._delay&&this.vars.immediateRender!==!1)&&(this._time=-n,this.render(Math.min(0,-this._delay)))},!0),I=function(b){return b&&b.length&&b!==a&&b[0]&&(b[0]===a||b[0].nodeType&&b[0].style&&!b.nodeType)},J=function(a,b){var c,d={};for(c in a)V[c]||c in b&&"transform"!==c&&"x"!==c&&"y"!==c&&"width"!==c&&"height"!==c&&"className"!==c&&"border"!==c||!(!S[c]||S[c]&&S[c]._autoCSS)||(d[c]=a[c],delete a[c]);a.css=d};i=H.prototype=new E,i.constructor=H,i.kill()._gc=!1,i.ratio=0,i._firstPT=i._targets=i._overwrittenProps=i._startAt=null,i._notifyPluginsOfEnabled=i._lazy=!1,H.version="2.0.1",H.defaultEase=i._ease=new w(null,null,1,1),H.defaultOverwrite="auto",H.ticker=j,H.autoSleep=120,H.lagSmoothing=function(a,b){j.lagSmoothing(a,b)},H.selector=a.$||a.jQuery||function(b){var c=a.$||a.jQuery;return c?(H.selector=c,c(b)):(d||(d=a.document),d?d.querySelectorAll?d.querySelectorAll(b):d.getElementById("#"===b.charAt(0)?b.substr(1):b):b)};var K=[],L={},M=/(?:(-|-=|\+=)?\d*\.?\d*(?:e[\-+]?\d+)?)[0-9]/gi,N=/[\+-]=-?[\.\d]/,O=function(a){for(var b,c=this._firstPT,d=1e-6;c;)b=c.blob?1===a&&null!=this.end?this.end:a?this.join(""):this.start:c.c*a+c.s,c.m?b=c.m.call(this._tween,b,this._target||c.t,this._tween):d>b&&b>-d&&!c.blob&&(b=0),c.f?c.fp?c.t[c.p](c.fp,b):c.t[c.p](b):c.t[c.p]=b,c=c._next},P=function(a,b,c,d){var e,f,g,h,i,j,k,l=[],m=0,n="",o=0;for(l.start=a,l.end=b,a=l[0]=a+"",b=l[1]=b+"",c&&(c(l),a=l[0],b=l[1]),l.length=0,e=a.match(M)||[],f=b.match(M)||[],d&&(d._next=null,d.blob=1,l._firstPT=l._applyPT=d),i=f.length,h=0;i>h;h++)k=f[h],j=b.substr(m,b.indexOf(k,m)-m),n+=j||!h?j:",",m+=j.length,o?o=(o+1)%5:"rgba("===j.substr(-5)&&(o=1),k===e[h]||e.length<=h?n+=k:(n&&(l.push(n),n=""),g=parseFloat(e[h]),l.push(g),l._firstPT={_next:l._firstPT,t:l,p:l.length-1,s:g,c:("="===k.charAt(1)?parseInt(k.charAt(0)+"1",10)*parseFloat(k.substr(2)):parseFloat(k)-g)||0,f:0,m:o&&4>o?Math.round:0}),m+=k.length;return n+=b.substr(m),n&&l.push(n),l.setRatio=O,N.test(b)&&(l.end=null),l},Q=function(a,b,c,d,e,f,g,h,i){"function"==typeof d&&(d=d(i||0,a));var j,k=typeof a[b],l="function"!==k?"":b.indexOf("set")||"function"!=typeof a["get"+b.substr(3)]?b:"get"+b.substr(3),m="get"!==c?c:l?g?a[l](g):a[l]():a[b],n="string"==typeof d&&"="===d.charAt(1),o={t:a,p:b,s:m,f:"function"===k,pg:0,n:e||b,m:f?"function"==typeof f?f:Math.round:0,pr:0,c:n?parseInt(d.charAt(0)+"1",10)*parseFloat(d.substr(2)):parseFloat(d)-m||0};return("number"!=typeof m||"number"!=typeof d&&!n)&&(g||isNaN(m)||!n&&isNaN(d)||"boolean"==typeof m||"boolean"==typeof d?(o.fp=g,j=P(m,n?parseFloat(o.s)+o.c+(o.s+"").replace(/[0-9\-\.]/g,""):d,h||H.defaultStringFilter,o),o={t:j,p:"setRatio",s:0,c:1,f:2,pg:0,n:e||b,pr:0,m:0}):(o.s=parseFloat(m),n||(o.c=parseFloat(d)-o.s||0))),o.c?((o._next=this._firstPT)&&(o._next._prev=o),this._firstPT=o,o):void 0},R=H._internals={isArray:q,isSelector:I,lazyTweens:K,blobDif:P},S=H._plugins={},T=R.tweenLookup={},U=0,V=R.reservedProps={ease:1,delay:1,overwrite:1,onComplete:1,onCompleteParams:1,onCompleteScope:1,useFrames:1,runBackwards:1,startAt:1,onUpdate:1,onUpdateParams:1,onUpdateScope:1,onStart:1,onStartParams:1,onStartScope:1,onReverseComplete:1,onReverseCompleteParams:1,onReverseCompleteScope:1,onRepeat:1,onRepeatParams:1,onRepeatScope:1,easeParams:1,yoyo:1,immediateRender:1,repeat:1,repeatDelay:1,data:1,paused:1,reversed:1,autoCSS:1,lazy:1,onOverwrite:1,callbackScope:1,stringFilter:1,id:1,yoyoEase:1},W={none:0,all:1,auto:2,concurrent:3,allOnStart:4,preexisting:5,"true":1,"false":0},X=E._rootFramesTimeline=new G,Y=E._rootTimeline=new G,Z=30,$=R.lazyRender=function(){var a,b=K.length;for(L={};--b>-1;)a=K[b],a&&a._lazy!==!1&&(a.render(a._lazy[0],a._lazy[1],!0),a._lazy=!1);K.length=0};Y._startTime=j.time,X._startTime=j.frame,Y._active=X._active=!0,setTimeout($,1),E._updateRoot=H.render=function(){var a,b,c;if(K.length&&$(),Y.render((j.time-Y._startTime)*Y._timeScale,!1,!1),X.render((j.frame-X._startTime)*X._timeScale,!1,!1),K.length&&$(),j.frame>=Z){Z=j.frame+(parseInt(H.autoSleep,10)||120);for(c in T){for(b=T[c].tweens,a=b.length;--a>-1;)b[a]._gc&&b.splice(a,1);0===b.length&&delete T[c]}if(c=Y._first,(!c||c._paused)&&H.autoSleep&&!X._first&&1===j._listeners.tick.length){for(;c&&c._paused;)c=c._next;c||j.sleep()}}},j.addEventListener("tick",E._updateRoot);var _=function(a,b,c){var d,e,f=a._gsTweenID;if(T[f||(a._gsTweenID=f="t"+U++)]||(T[f]={target:a,tweens:[]}),b&&(d=T[f].tweens,d[e=d.length]=b,c))for(;--e>-1;)d[e]===b&&d.splice(e,1);return T[f].tweens},aa=function(a,b,c,d){var e,f,g=a.vars.onOverwrite;return g&&(e=g(a,b,c,d)),g=H.onOverwrite,g&&(f=g(a,b,c,d)),e!==!1&&f!==!1},ba=function(a,b,c,d,e){var f,g,h,i;if(1===d||d>=4){for(i=e.length,f=0;i>f;f++)if((h=e[f])!==b)h._gc||h._kill(null,a,b)&&(g=!0);else if(5===d)break;return g}var j,k=b._startTime+n,l=[],m=0,o=0===b._duration;for(f=e.length;--f>-1;)(h=e[f])===b||h._gc||h._paused||(h._timeline!==b._timeline?(j=j||ca(b,0,o),0===ca(h,j,o)&&(l[m++]=h)):h._startTime<=k&&h._startTime+h.totalDuration()/h._timeScale>k&&((o||!h._initted)&&k-h._startTime<=2e-10||(l[m++]=h)));for(f=m;--f>-1;)if(h=l[f],2===d&&h._kill(c,a,b)&&(g=!0),2!==d||!h._firstPT&&h._initted){if(2!==d&&!aa(h,b))continue;h._enabled(!1,!1)&&(g=!0)}return g},ca=function(a,b,c){for(var d=a._timeline,e=d._timeScale,f=a._startTime;d._timeline;){if(f+=d._startTime,e*=d._timeScale,d._paused)return-100;d=d._timeline}return f/=e,f>b?f-b:c&&f===b||!a._initted&&2*n>f-b?n:(f+=a.totalDuration()/a._timeScale/e)>b+n?0:f-b-n};i._init=function(){var a,b,c,d,e,f,g=this.vars,h=this._overwrittenProps,i=this._duration,j=!!g.immediateRender,k=g.ease;if(g.startAt){this._startAt&&(this._startAt.render(-1,!0),this._startAt.kill()),e={};for(d in g.startAt)e[d]=g.startAt[d];if(e.data="isStart",e.overwrite=!1,e.immediateRender=!0,e.lazy=j&&g.lazy!==!1,e.startAt=e.delay=null,e.onUpdate=g.onUpdate,e.onUpdateParams=g.onUpdateParams,e.onUpdateScope=g.onUpdateScope||g.callbackScope||this,this._startAt=H.to(this.target||{},0,e),j)if(this._time>0)this._startAt=null;else if(0!==i)return}else if(g.runBackwards&&0!==i)if(this._startAt)this._startAt.render(-1,!0),this._startAt.kill(),this._startAt=null;else{0!==this._time&&(j=!1),c={};for(d in g)V[d]&&"autoCSS"!==d||(c[d]=g[d]);if(c.overwrite=0,c.data="isFromStart",c.lazy=j&&g.lazy!==!1,c.immediateRender=j,this._startAt=H.to(this.target,0,c),j){if(0===this._time)return}else this._startAt._init(),this._startAt._enabled(!1),this.vars.immediateRender&&(this._startAt=null)}if(this._ease=k=k?k instanceof w?k:"function"==typeof k?new w(k,g.easeParams):x[k]||H.defaultEase:H.defaultEase,g.easeParams instanceof Array&&k.config&&(this._ease=k.config.apply(k,g.easeParams)),this._easeType=this._ease._type,this._easePower=this._ease._power,this._firstPT=null,this._targets)for(f=this._targets.length,a=0;f>a;a++)this._initProps(this._targets[a],this._propLookup[a]={},this._siblings[a],h?h[a]:null,a)&&(b=!0);else b=this._initProps(this.target,this._propLookup,this._siblings,h,0);if(b&&H._onPluginEvent("_onInitAllProps",this),h&&(this._firstPT||"function"!=typeof this.target&&this._enabled(!1,!1)),g.runBackwards)for(c=this._firstPT;c;)c.s+=c.c,c.c=-c.c,c=c._next;this._onUpdate=g.onUpdate,this._initted=!0},i._initProps=function(b,c,d,e,f){var g,h,i,j,k,l;if(null==b)return!1;L[b._gsTweenID]&&$(),this.vars.css||b.style&&b!==a&&b.nodeType&&S.css&&this.vars.autoCSS!==!1&&J(this.vars,b);for(g in this.vars)if(l=this.vars[g],V[g])l&&(l instanceof Array||l.push&&q(l))&&-1!==l.join("").indexOf("{self}")&&(this.vars[g]=l=this._swapSelfInParams(l,this));else if(S[g]&&(j=new S[g])._onInitTween(b,this.vars[g],this,f)){for(this._firstPT=k={_next:this._firstPT,t:j,p:"setRatio",s:0,c:1,f:1,n:g,pg:1,pr:j._priority,m:0},h=j._overwriteProps.length;--h>-1;)c[j._overwriteProps[h]]=this._firstPT;(j._priority||j._onInitAllProps)&&(i=!0),(j._onDisable||j._onEnable)&&(this._notifyPluginsOfEnabled=!0),k._next&&(k._next._prev=k)}else c[g]=Q.call(this,b,g,"get",l,g,0,null,this.vars.stringFilter,f);return e&&this._kill(e,b)?this._initProps(b,c,d,e,f):this._overwrite>1&&this._firstPT&&d.length>1&&ba(b,this,c,this._overwrite,d)?(this._kill(c,b),this._initProps(b,c,d,e,f)):(this._firstPT&&(this.vars.lazy!==!1&&this._duration||this.vars.lazy&&!this._duration)&&(L[b._gsTweenID]=!0),i)},i.render=function(a,b,c){var d,e,f,g,h=this._time,i=this._duration,j=this._rawPrevTime;if(a>=i-1e-7&&a>=0)this._totalTime=this._time=i,this.ratio=this._ease._calcEnd?this._ease.getRatio(1):1,this._reversed||(d=!0,e="onComplete",c=c||this._timeline.autoRemoveChildren),0===i&&(this._initted||!this.vars.lazy||c)&&(this._startTime===this._timeline._duration&&(a=0),(0>j||0>=a&&a>=-1e-7||j===n&&"isPause"!==this.data)&&j!==a&&(c=!0,j>n&&(e="onReverseComplete")),this._rawPrevTime=g=!b||a||j===a?a:n);else if(1e-7>a)this._totalTime=this._time=0,this.ratio=this._ease._calcEnd?this._ease.getRatio(0):0,(0!==h||0===i&&j>0)&&(e="onReverseComplete",d=this._reversed),0>a&&(this._active=!1,0===i&&(this._initted||!this.vars.lazy||c)&&(j>=0&&(j!==n||"isPause"!==this.data)&&(c=!0),this._rawPrevTime=g=!b||a||j===a?a:n)),(!this._initted||this._startAt&&this._startAt.progress())&&(c=!0);else if(this._totalTime=this._time=a,this._easeType){var k=a/i,l=this._easeType,m=this._easePower;(1===l||3===l&&k>=.5)&&(k=1-k),3===l&&(k*=2),1===m?k*=k:2===m?k*=k*k:3===m?k*=k*k*k:4===m&&(k*=k*k*k*k),1===l?this.ratio=1-k:2===l?this.ratio=k:.5>a/i?this.ratio=k/2:this.ratio=1-k/2}else this.ratio=this._ease.getRatio(a/i);if(this._time!==h||c){if(!this._initted){if(this._init(),!this._initted||this._gc)return;if(!c&&this._firstPT&&(this.vars.lazy!==!1&&this._duration||this.vars.lazy&&!this._duration))return this._time=this._totalTime=h,this._rawPrevTime=j,K.push(this),void(this._lazy=[a,b]);this._time&&!d?this.ratio=this._ease.getRatio(this._time/i):d&&this._ease._calcEnd&&(this.ratio=this._ease.getRatio(0===this._time?0:1))}for(this._lazy!==!1&&(this._lazy=!1),this._active||!this._paused&&this._time!==h&&a>=0&&(this._active=!0),0===h&&(this._startAt&&(a>=0?this._startAt.render(a,!0,c):e||(e="_dummyGS")),this.vars.onStart&&(0!==this._time||0===i)&&(b||this._callback("onStart"))),f=this._firstPT;f;)f.f?f.t[f.p](f.c*this.ratio+f.s):f.t[f.p]=f.c*this.ratio+f.s,f=f._next;this._onUpdate&&(0>a&&this._startAt&&a!==-1e-4&&this._startAt.render(a,!0,c),b||(this._time!==h||d||c)&&this._callback("onUpdate")),e&&(!this._gc||c)&&(0>a&&this._startAt&&!this._onUpdate&&a!==-1e-4&&this._startAt.render(a,!0,c),d&&(this._timeline.autoRemoveChildren&&this._enabled(!1,!1),this._active=!1),!b&&this.vars[e]&&this._callback(e),0===i&&this._rawPrevTime===n&&g!==n&&(this._rawPrevTime=0))}},i._kill=function(a,b,c){if("all"===a&&(a=null),null==a&&(null==b||b===this.target))return this._lazy=!1,this._enabled(!1,!1);b="string"!=typeof b?b||this._targets||this.target:H.selector(b)||b;var d,e,f,g,h,i,j,k,l,m=c&&this._time&&c._startTime===this._startTime&&this._timeline===c._timeline;if((q(b)||I(b))&&"number"!=typeof b[0])for(d=b.length;--d>-1;)this._kill(a,b[d],c)&&(i=!0);else{if(this._targets){for(d=this._targets.length;--d>-1;)if(b===this._targets[d]){h=this._propLookup[d]||{},this._overwrittenProps=this._overwrittenProps||[],e=this._overwrittenProps[d]=a?this._overwrittenProps[d]||{}:"all";break}}else{if(b!==this.target)return!1;h=this._propLookup,e=this._overwrittenProps=a?this._overwrittenProps||{}:"all"}if(h){if(j=a||h,k=a!==e&&"all"!==e&&a!==h&&("object"!=typeof a||!a._tempKill),c&&(H.onOverwrite||this.vars.onOverwrite)){for(f in j)h[f]&&(l||(l=[]),l.push(f));if((l||!a)&&!aa(this,c,b,l))return!1}for(f in j)(g=h[f])&&(m&&(g.f?g.t[g.p](g.s):g.t[g.p]=g.s,i=!0),g.pg&&g.t._kill(j)&&(i=!0),g.pg&&0!==g.t._overwriteProps.length||(g._prev?g._prev._next=g._next:g===this._firstPT&&(this._firstPT=g._next),g._next&&(g._next._prev=g._prev),g._next=g._prev=null),delete h[f]),k&&(e[f]=1);!this._firstPT&&this._initted&&this._enabled(!1,!1)}}return i},i.invalidate=function(){return this._notifyPluginsOfEnabled&&H._onPluginEvent("_onDisable",this),this._firstPT=this._overwrittenProps=this._startAt=this._onUpdate=null,this._notifyPluginsOfEnabled=this._active=this._lazy=!1,this._propLookup=this._targets?{}:[],E.prototype.invalidate.call(this),this.vars.immediateRender&&(this._time=-n,this.render(Math.min(0,-this._delay))),this},i._enabled=function(a,b){if(k||j.wake(),a&&this._gc){var c,d=this._targets;if(d)for(c=d.length;--c>-1;)this._siblings[c]=_(d[c],this,!0);else this._siblings=_(this.target,this,!0)}return E.prototype._enabled.call(this,a,b),this._notifyPluginsOfEnabled&&this._firstPT?H._onPluginEvent(a?"_onEnable":"_onDisable",this):!1},H.to=function(a,b,c){return new H(a,b,c)},H.from=function(a,b,c){return c.runBackwards=!0,c.immediateRender=0!=c.immediateRender,new H(a,b,c)},H.fromTo=function(a,b,c,d){return d.startAt=c,d.immediateRender=0!=d.immediateRender&&0!=c.immediateRender,new H(a,b,d)},H.delayedCall=function(a,b,c,d,e){return new H(b,0,{delay:a,onComplete:b,onCompleteParams:c,callbackScope:d,onReverseComplete:b,onReverseCompleteParams:c,immediateRender:!1,lazy:!1,useFrames:e,overwrite:0})},H.set=function(a,b){return new H(a,0,b)},H.getTweensOf=function(a,b){if(null==a)return[];a="string"!=typeof a?a:H.selector(a)||a;var c,d,e,f;if((q(a)||I(a))&&"number"!=typeof a[0]){for(c=a.length,d=[];--c>-1;)d=d.concat(H.getTweensOf(a[c],b));for(c=d.length;--c>-1;)for(f=d[c],e=c;--e>-1;)f===d[e]&&d.splice(c,1)}else if(a._gsTweenID)for(d=_(a).concat(),c=d.length;--c>-1;)(d[c]._gc||b&&!d[c].isActive())&&d.splice(c,1);return d||[]},H.killTweensOf=H.killDelayedCallsTo=function(a,b,c){"object"==typeof b&&(c=b,b=!1);for(var d=H.getTweensOf(a,b),e=d.length;--e>-1;)d[e]._kill(c,a)};var da=u("plugins.TweenPlugin",function(a,b){this._overwriteProps=(a||"").split(","),this._propName=this._overwriteProps[0],this._priority=b||0,this._super=da.prototype},!0);if(i=da.prototype,da.version="1.19.0",da.API=2,i._firstPT=null,i._addTween=Q,i.setRatio=O,i._kill=function(a){var b,c=this._overwriteProps,d=this._firstPT;if(null!=a[this._propName])this._overwriteProps=[];else for(b=c.length;--b>-1;)null!=a[c[b]]&&c.splice(b,1);for(;d;)null!=a[d.n]&&(d._next&&(d._next._prev=d._prev),d._prev?(d._prev._next=d._next,d._prev=null):this._firstPT===d&&(this._firstPT=d._next)),d=d._next;return!1},i._mod=i._roundProps=function(a){for(var b,c=this._firstPT;c;)b=a[this._propName]||null!=c.n&&a[c.n.split(this._propName+"_").join("")],b&&"function"==typeof b&&(2===c.f?c.t._applyPT.m=b:c.m=b),c=c._next},H._onPluginEvent=function(a,b){var c,d,e,f,g,h=b._firstPT;if("_onInitAllProps"===a){for(;h;){for(g=h._next,d=e;d&&d.pr>h.pr;)d=d._next;(h._prev=d?d._prev:f)?h._prev._next=h:e=h,(h._next=d)?d._prev=h:f=h,h=g}h=b._firstPT=e}for(;h;)h.pg&&"function"==typeof h.t[a]&&h.t[a]()&&(c=!0),h=h._next;return c},da.activate=function(a){for(var b=a.length;--b>-1;)a[b].API===da.API&&(S[(new a[b])._propName]=a[b]);return!0},t.plugin=function(a){if(!(a&&a.propName&&a.init&&a.API))throw"illegal plugin definition.";var b,c=a.propName,d=a.priority||0,e=a.overwriteProps,f={init:"_onInitTween",set:"setRatio",kill:"_kill",round:"_mod",mod:"_mod",initAll:"_onInitAllProps"},g=u("plugins."+c.charAt(0).toUpperCase()+c.substr(1)+"Plugin",function(){da.call(this,c,d),this._overwriteProps=e||[]},a.global===!0),h=g.prototype=new da(c);h.constructor=g,g.API=a.API;for(b in f)"function"==typeof a[b]&&(h[f[b]]=a[b]);return g.version=a.version,da.activate([g]),g},g=a._gsQueue){for(h=0;h<g.length;h++)g[h]();for(i in r)r[i].func||a.console.log("GSAP encountered missing dependency: "+i)}k=!1}("undefined"!=typeof module&&module.exports&&"undefined"!=typeof global?global:this||window,"TweenLite"); | PypiClean |
/Abhishek-1.0.1.tar.gz/Abhishek-1.0.1/TOPSIS/topsis.py | import sys
import csv
import pandas as pd
import os
import copy
para_num=len(sys.argv)
if para_num!=5:
print("Incorrect number of paramters")
exit(0)
inputData=sys.argv[1]
try:
s=open(inputData)
except FileNotFoundError:
raise Exception("File doesn't exist")
data=pd.read_csv(inputData)
x,y=data.shape
if y<3:
raise Exception("File with three or more column is valid only!")
inp_weight=sys.argv[2]
inp_impact=sys.argv[3]
weight=[]
impact=[]
for i in range(len(inp_weight)):
if i%2!=0 and inp_weight[i]!=',':
print("Weights aren't seperated by commas")
exit(0)
if i%2==0:
num=int(inp_weight[i])
weight.append(num)
for i in range(len(inp_impact)):
if i%2!=0 and inp_impact[i]!=',':
print("Impacts aren't seperated by commas")
exit(0)
if i%2==0:
if inp_impact[i]=='+' or inp_impact[i]=='-':
impact.append(inp_impact[i])
else:
print("Impact is neither +ve or -ve")
exit(0)
if y-1!=len(weight):
print("Number of weight and columns (from 2nd to last column) aren't same")
exit(0)
if y-1!=len(impact):
print("Number of impact and columns (from 2nd to last column) aren't same")
exit(0)
data_columns=list(data.columns)
data=data.values.tolist()
c_data=copy.deepcopy(data)
#normalized performance value
for i in range(1,y):
sum=0
for j in range(x):
if(isinstance(data[j][i], str)):
print("Data in the input file is not numeric")
exit(0)
else:
sum=sum+data[j][i]**2
sum=sum**0.5
for k in range(x):
data[k][i]=data[k][i]/sum
#weighted normalized decision matrix
for i in range(1,y):
for j in range(x):
data[j][i]=data[j][i]*weight[i-1]
#ideal best value and ideal worst value
i_best=[]
i_worst=[]
#calculating ideal best and worst for every feature/column
for i in range(1,y):
maxi=data[0][i]
mini=data[0][i]
for j in range(x):
if data[j][i]>maxi:
maxi=data[j][i]
if data[j][i]<mini:
mini=data[j][i]
if impact[i-1]=='+':
i_best.append(maxi)
i_worst.append(mini)
else:
i_best.append(mini)
i_worst.append(maxi)
#Euclidean distance from ideal best value and ideal worst value
s_best=[]
s_worst=[]
#Calculating euclidean distance for each feature/column
for i in range(x):
sum1=0
sum2=0
for j in range(1,y):
sum1=sum1+(data[i][j]-i_best[j-1])**2
sum2=sum2+(data[i][j]-i_worst[j-1])**2
sum1=sum1**0.5
sum2=sum2**0.5
s_best.append(sum1)
s_worst.append(sum2)
performance_score=[]
temp_score=[]
#Calculating performance score for each data row
for i in range(x):
score=s_worst[i]/(s_best[i]+s_worst[i])
performance_score.append(score)
temp_score.append(score)
temp_score.sort(reverse=True)
#Calculating the ranking
rank=[]
for i in range(x):
for j in range(x):
if(performance_score[i]==temp_score[j]):
rank.append(j+1)
result=[]
for i in range (x):
l=[]
for j in range(y):
l.append(c_data[i][j])
l.append(performance_score[i])
l.append(rank[i])
result.append(l)
#adding column name to result.csv file
data_columns.append("Topsis Score")
data_columns.append("Rank")
#creating result.csv file
result_csv=open(sys.argv[4],'x')
#giving column names to csv file
fields=data_columns
#creating a csv writer object
csvwriter = csv.writer(result_csv)
#writing the fields
csvwriter.writerow(fields)
#writing the data rows
csvwriter.writerows(result)
#closing log csv file
result_csv.close()
print()
print("Result file containing all the input columns, TOPSIS SCORE and RANK is ready!") | PypiClean |
/ModularTorch-0.1.4.tar.gz/ModularTorch-0.1.4/modular_torch/Utils.py | import torch
import os
from torch.utils.tensorboard import SummaryWriter
import datetime
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def save_model_weights(model: torch.nn.Module,
model_name: str,
save_path: str = None) -> None:
"""Save model to given directory.
args:
model: model to save weights.
model_name: name of the model (convention is {model_name_epochs.pth})
save_path: directory to save the model in, (defaults to pwd, creates directories as required)
return:
model weights are saved at given path
"""
assert model_name.endswith("pth") or model_name.endswith("pt"), "model path should end with 'pt' or 'pth'"
if save_path is None:
save_path = "models/"
os.makedirs(save_path, exist_ok = True)
model_path = save_path + f"{model_name}"
torch.save(model, model_path)
def create_writer(experiment_name: str,
model_name: str,
include_time: bool = False,
comments: str = None,
path: str = "runs") -> SummaryWriter:
"""_summary_
Args:
experiment_name (str): name of experiment / randomly generated.
model_name (str): name of the model
include_time (bool, optional): include time in run name. Defaults to False.
comments (str, optional): comments to add to run (like configs or hyper params). Defaults to None.
path (str, optional): path to save run to. Defaults to "runs".
Returns:
SummaryWriter: writer object to log metrics to a file.
"""
path = path + "/[d]" + datetime.datetime.now().strftime("%Y-%m-%d")
if include_time:
path = path + "_[t]" + datetime.datetime.now().strftime("%H_%M_%S")
path = f"{path}/{experiment_name}/{model_name}"
if comments:
path = f"{path}/{comments}"
print(f"[INFO] created summary writer, saving to {path}")
return SummaryWriter(log_dir = path)
def plot_loss_acc_curves_from_history_dicts(train_history: dict,
test_history: dict) -> None:
"""plots loss and accuracy curves, given history dicts.
Args:
train_history (dict): train history dictionary
test_history (dict): test history dictionary
"""
range_epochs = [*range(train_history["loss"].__len__())]
fig = make_subplots(rows = 1, cols = 2, subplot_titles = ["accuracy", "loss"])
# acc plots
fig.add_trace(go.Scatter(x = range_epochs, y = train_history["acc"], name = "train_accuracy", mode = "lines+markers"), row = 1, col = 1)
fig.add_trace(go.Scatter(x = range_epochs, y = test_history["acc"], name = "test_accuracy", mode = "lines+markers"), row = 1, col = 1)
fig.add_trace(go.Scatter(x = range_epochs, y = train_history["loss"], name = "train_loss", mode = "lines+markers"), row = 1, col = 2)
fig.add_trace(go.Scatter(x = range_epochs, y = test_history["loss"], name = "test_loss", mode = "lines+markers"), row = 1, col = 2)
fig.update_layout(title = "train vs test (accuracy and loss)")
fig.show() | PypiClean |
/Flask_Admin-1.6.1-py3-none-any.whl/flask_admin/static/admin/js/bs4_filters.js | var AdminFilters = function(element, filtersElement, filterGroups, activeFilters) {
var $root = $(element);
var $container = $('.filters', $root);
var lastCount = 0;
function getCount(name) {
var idx = name.indexOf('_');
if (idx === -1) {
return 0;
}
return parseInt(name.substr(3, idx - 3), 10);
}
function makeName(name) {
var result = 'flt' + lastCount + '_' + name;
lastCount += 1;
return result;
}
function removeFilter() {
$(this).closest('tr').remove();
if($('.filters tr').length == 0) {
$('button', $root).hide();
$('a[class=btn]', $root).hide();
$('.filters tbody').remove();
} else {
$('button', $root).show();
}
return false;
}
// triggered when the filter operation (equals, not equals, etc) is changed
function changeOperation(subfilters, $el, filter, $select) {
// get the filter_group subfilter based on the index of the selected option
var selectedFilter = subfilters[$select.select2('data').element[0].index];
var $inputContainer = $el.find('td').last();
// recreate and style the input field (turn into date range or select2 if necessary)
var $field = createFilterInput($inputContainer, null, selectedFilter);
styleFilterInput(selectedFilter, $field);
$('button', $root).show();
}
// generate HTML for filter input - allows changing filter input type to one with options or tags
function createFilterInput(inputContainer, filterValue, filter) {
if (filter.type == "select2-tags") {
var $field = $('<input type="hidden" class="filter-val form-control" />').attr('name', makeName(filter.arg));
$field.val(filterValue);
} else if (filter.options) {
var $field = $('<select class="filter-val" />').attr('name', makeName(filter.arg));
$(filter.options).each(function() {
// for active filter inputs with options, add "selected" if there is a matching active filter
if (filterValue && (filterValue == this[0])) {
$field.append($('<option/>')
.val(this[0]).text(this[1]).attr('selected', true));
} else {
$field.append($('<option/>')
.val(this[0]).text(this[1]));
}
});
} else {
var $field = $('<input type="text" class="filter-val form-control" />').attr('name', makeName(filter.arg));
$field.val(filterValue);
}
inputContainer.replaceWith($('<td/>').append($field));
return $field;
}
// add styling to input field, accommodates filters that change the input field's HTML
function styleFilterInput(filter, field) {
if (filter.type) {
if ((filter.type == "datepicker") || (filter.type == "daterangepicker")) {
field.attr('data-date-format', "YYYY-MM-DD");
} else if ((filter.type == "datetimepicker") || (filter.type == "datetimerangepicker")) {
field.attr('data-date-format', "YYYY-MM-DD HH:mm:ss");
} else if ((filter.type == "timepicker") || (filter.type == "timerangepicker")) {
field.attr('data-date-format', "HH:mm:ss");
} else if (filter.type == "select2-tags") {
var options = [];
if (filter.options) {
filter.options.forEach(function(option) {
options.push({id:option[0], text:option[1]});
});
// save tag options as json on data attribute
field.attr('data-tags', JSON.stringify(options));
}
}
faForm.applyStyle(field, filter.type);
} else if (filter.options) {
filter.type = "select2";
faForm.applyStyle(field, filter.type);
}
return field;
}
function addFilter(name, subfilters, selectedIndex, filterValue) {
var $el = $('<tr class="form-horizontal" />').appendTo($container);
// Filter list
$el.append(
$('<td/>').append(
$('<a href="#" class="btn btn-secondary remove-filter" />')
.append($('<span class="close-icon">×</span>'))
.append(' ')
.append(name)
.click(removeFilter)
)
);
// Filter operation <select> (equal, not equal, etc)
var $select = $('<select class="filter-op" />');
// if one of the subfilters are selected, use that subfilter to create the input field
var filterSelection = 0;
$.each(subfilters, function( subfilterIndex, subfilter ) {
if (this.index == selectedIndex) {
$select.append($('<option/>').attr('value', subfilter.arg).attr('selected', true).text(subfilter.operation));
filterSelection = subfilterIndex;
} else {
$select.append($('<option/>').attr('value', subfilter.arg).text(subfilter.operation));
}
});
$el.append(
$('<td/>').append($select)
);
// select2 for filter-op (equal, not equal, etc)
$select.select2({width: 'resolve'}).on("change", function(e) {
changeOperation(subfilters, $el, filter, $select);
});
// get filter option from filter_group, only for new filter creation
var filter = subfilters[filterSelection];
var $inputContainer = $('<td/>').appendTo($el);
var $newFilterField = createFilterInput($inputContainer, filterValue, filter).focus();
var $styledFilterField = styleFilterInput(filter, $newFilterField);
return $styledFilterField;
}
// Add Filter Button, new filter
$('a.filter', filtersElement).click(function() {
var name = ($(this).text().trim !== undefined ? $(this).text().trim() : $(this).text().replace(/^\s+|\s+$/g,''));
addFilter(name, filterGroups[name], false, null);
$('button', $root).show();
});
// on page load - add active filters
$.each(activeFilters, function( activeIndex, activeFilter ) {
var idx = activeFilter[0],
name = activeFilter[1],
filterValue = activeFilter[2];
var $activeField = addFilter(name, filterGroups[name], idx, filterValue);
});
// show "Apply Filter" button when filter input is changed
$('.filter-val', $root).on('input change', function() {
$('button', $root).show();
});
$('.remove-filter', $root).click(removeFilter);
$('.filter-val', $root).not('.select2-container').each(function() {
var count = getCount($(this).attr('name'));
if (count > lastCount)
lastCount = count;
});
lastCount += 1;
};
(function($) {
$('[data-role=tooltip]').tooltip({
html: true,
placement: 'bottom'
});
$(document).on('adminFormReady', function(evt){
if ($('#filter-groups-data').length == 1) {
var filter = new AdminFilters(
'#filter_form', '.field-filters',
JSON.parse($('#filter-groups-data').text()),
JSON.parse($('#active-filters-data').text())
);
}
});
$(document).trigger('adminFormReady'); // trigger event to allow dynamic filter form to function properly
})(jQuery); | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/gis/geos/prototypes/threadsafe.py | import threading
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import (
CONTEXT_PTR, error_h, lgeos, notice_h,
)
class GEOSContextHandle(GEOSBase):
"""Represent a GEOS context handle."""
ptr_type = CONTEXT_PTR
destructor = lgeos.finishGEOS_r
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
class GEOSFunc:
"""
Serve as a wrapper for GEOS C Functions. Use thread-safe function
variants when available.
"""
def __init__(self, func_name):
# GEOS thread-safe function signatures end with '_r' and take an
# additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + '_r')
# Create a reference to thread_context so it's not garbage-collected
# before an attempt to call this object.
self.thread_context = thread_context
def __call__(self, *args):
# Create a context handle if one doesn't exist for this thread.
self.thread_context.handle = self.thread_context.handle or GEOSContextHandle()
# Call the threaded GEOS routine with the pointer of the context handle
# as the first argument.
return self.cfunc(self.thread_context.handle.ptr, *args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
self.cfunc.argtypes = [CONTEXT_PTR, *argtypes]
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck) | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/db/migrations/operations/special.py | from djmodels.db import router
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Take two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
serialization_expand_args = ['database_operations', 'state_operations']
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs['database_operations'] = self.database_operations
if self.state_operations:
kwargs['state_operations'] = self.state_operations
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(app_label, schema_editor, from_state, to_state)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
to_states = {}
for dbop in self.database_operations:
to_states[dbop] = to_state
to_state = to_state.clone()
dbop.state_forwards(app_label, to_state)
# to_state now has the states of all the database_operations applied
# which is the from_state for the backwards migration of the last
# operation.
for database_operation in reversed(self.database_operations):
from_state = to_state
to_state = to_states[database_operation]
database_operation.database_backwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Run some raw SQL. A reverse SQL statement may be provided.
Also accept a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__qualname__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Run Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=None, hints=None, elidable=False):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not None:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__qualname__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# RunPython has access to all models. Ensure that all models are
# reloaded in case any are delayed.
from_state.clear_delayed_apps_cache()
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/auth/views.py | from urllib.parse import urlparse, urlunparse
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
UserModel = get_user_model()
class SuccessURLAllowedHostsMixin:
success_url_allowed_hosts = set()
def get_success_url_allowed_hosts(self):
return {self.request.get_host(), *self.success_url_allowed_hosts}
class LoginView(SuccessURLAllowedHostsMixin, FormView):
"""
Display the login form and handle the login action.
"""
form_class = AuthenticationForm
authentication_form = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
redirect_authenticated_user = False
extra_context = None
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ''
def get_form_class(self):
return self.authentication_form or self.form_class
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
self.redirect_field_name: self.get_redirect_url(),
'site': current_site,
'site_name': current_site.name,
**(self.extra_context or {})
})
return context
class LogoutView(SuccessURLAllowedHostsMixin, TemplateView):
"""
Log out the user and display the 'You are logged out' message.
"""
next_page = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/logged_out.html'
extra_context = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
auth_logout(request)
next_page = self.get_next_page()
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""Logout may be done via POST."""
return self.get(request, *args, **kwargs)
def get_next_page(self):
if self.next_page is not None:
next_page = resolve_url(self.next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
else:
next_page = self.next_page
if (self.redirect_field_name in self.request.POST or
self.redirect_field_name in self.request.GET):
next_page = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name)
)
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
# Security check -- Ensure the user-originating redirection URL is
# safe.
if not url_is_safe:
next_page = self.request.path
return next_page
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out'),
**(self.extra_context or {})
})
return context
def logout_then_login(request, login_url=None):
"""
Log out the user if they are logged in. Then redirect to the login page.
"""
login_url = resolve_url(login_url or settings.LOGIN_URL)
return LogoutView.as_view(next_page=login_url)(request)
def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirect the user to the login page, passing the given 'next' page.
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordContextMixin:
extra_context = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'title': self.title,
**(self.extra_context or {})
})
return context
class PasswordResetView(PasswordContextMixin, FormView):
email_template_name = 'registration/password_reset_email.html'
extra_email_context = None
form_class = PasswordResetForm
from_email = None
html_email_template_name = None
subject_template_name = 'registration/password_reset_subject.txt'
success_url = reverse_lazy('password_reset_done')
template_name = 'registration/password_reset_form.html'
title = _('Password reset')
token_generator = default_token_generator
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'token_generator': self.token_generator,
'from_email': self.from_email,
'email_template_name': self.email_template_name,
'subject_template_name': self.subject_template_name,
'request': self.request,
'html_email_template_name': self.html_email_template_name,
'extra_email_context': self.extra_email_context,
}
form.save(**opts)
return super().form_valid(form)
INTERNAL_RESET_URL_TOKEN = 'set-password'
INTERNAL_RESET_SESSION_TOKEN = '_password_reset_token'
class PasswordResetDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_done.html'
title = _('Password reset sent')
class PasswordResetConfirmView(PasswordContextMixin, FormView):
form_class = SetPasswordForm
post_reset_login = False
post_reset_login_backend = None
success_url = reverse_lazy('password_reset_complete')
template_name = 'registration/password_reset_confirm.html'
title = _('Enter new password')
token_generator = default_token_generator
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
assert 'uidb64' in kwargs and 'token' in kwargs
self.validlink = False
self.user = self.get_user(kwargs['uidb64'])
if self.user is not None:
token = kwargs['token']
if token == INTERNAL_RESET_URL_TOKEN:
session_token = self.request.session.get(INTERNAL_RESET_SESSION_TOKEN)
if self.token_generator.check_token(self.user, session_token):
# If the token is valid, display the password reset form.
self.validlink = True
return super().dispatch(*args, **kwargs)
else:
if self.token_generator.check_token(self.user, token):
# Store the token in the session and redirect to the
# password reset form at a URL without the token. That
# avoids the possibility of leaking the token in the
# HTTP Referer header.
self.request.session[INTERNAL_RESET_SESSION_TOKEN] = token
redirect_url = self.request.path.replace(token, INTERNAL_RESET_URL_TOKEN)
return HttpResponseRedirect(redirect_url)
# Display the "Password reset unsuccessful" page.
return self.render_to_response(self.get_context_data())
def get_user(self, uidb64):
try:
# urlsafe_base64_decode() decodes to bytestring
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist, ValidationError):
user = None
return user
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def form_valid(self, form):
user = form.save()
del self.request.session[INTERNAL_RESET_SESSION_TOKEN]
if self.post_reset_login:
auth_login(self.request, user, self.post_reset_login_backend)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.validlink:
context['validlink'] = True
else:
context.update({
'form': None,
'title': _('Password reset unsuccessful'),
'validlink': False,
})
return context
class PasswordResetCompleteView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_complete.html'
title = _('Password reset complete')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['login_url'] = resolve_url(settings.LOGIN_URL)
return context
class PasswordChangeView(PasswordContextMixin, FormView):
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
template_name = 'registration/password_change_form.html'
title = _('Password change')
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(self.request, form.user)
return super().form_valid(form)
class PasswordChangeDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_change_done.html'
title = _('Password change successful')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs) | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/iceR.py | from genice2.cell import cellvectors
import genice2.lattices
desc = {"ref": {"Methane A": 'Maynard-Casely 2010',
"R": 'Mochizuki 2014'},
"usage": "No options available.",
"brief": "Hypothetical ice R.",
"test": ({"args": "",
"options": "--depol=optimal"},)
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.cell = """
7.547382417065826 0 0
0.08957203488361681 7.54685087967168 0
0.08957203488361681 0.08851523136724358 7.546331774698035
"""
self.waters = """
0.7029999999999993 0.2040000000000006 0.08500000000000085
0.08500000000000087 0.7029999999999994 0.20400000000000063
0.2040000000000006 0.08500000000000087 0.7029999999999993
0.41999999999999993 0.9380000000000006 0.07199999999999918
0.07199999999999916 0.41999999999999993 0.9380000000000005
0.9380000000000005 0.07199999999999918 0.4199999999999999
0.4350000000000005 0.5190000000000001 0.18900000000000003
0.18900000000000006 0.4350000000000005 0.5190000000000001
0.519 0.18900000000000006 0.4350000000000005
0.9529999999999994 0.7200000000000006 0.6259999999999994
0.6259999999999994 0.9529999999999994 0.7200000000000005
0.7200000000000005 0.6259999999999993 0.9529999999999994
0.8390000000000004 0.44500000000000023 0.3179999999999996
0.3179999999999996 0.8390000000000003 0.4450000000000003
0.44500000000000023 0.31799999999999967 0.8390000000000003
0.7010000000000005 0.8330000000000001 0.29100000000000037
0.29100000000000037 0.7010000000000004 0.8330000000000002
0.8330000000000002 0.29100000000000037 0.7010000000000004
0.18599999999999994 0.18599999999999994 0.18599999999999994
0.9529999999999994 0.9529999999999994 0.9529999999999994
0.5749999999999993 0.5749999999999992 0.5749999999999992
"""
self.coord = "relative"
self.bondlen = 3.05
self.density = 1.5
self.cell = cellvectors(a=7.547382417065826,
b=7.547382417065826,
c=7.547382417065826,
A=89.31999999999998,
B=89.31999999999998,
C=89.31999999999998) | PypiClean |
/OGER-1.5.tar.gz/OGER-1.5/oger/er/entity_recognition.py |
# Nico Colic, September 2015
# Modified by Lenz Furrer, 2015--2016
'''
Entity Recognition core.
'''
import re
import csv
import pickle
import os.path
import logging
from ..ctrl import parameters
from ..nlp.tokenize import Text_processing
from ..util import misc, stream
from . import term_normalization as normalization
DEFAULT_TOKEN = (
# A token is a sequence of either numerical or alphabetical characters.
r'\d+|[^\W\d_]+',
# For abbreviation detection, a single parenthesis also forms a token.
r'\d+|[^\W\d_]+|[()]'
)
class EntityRecognizer(object):
"""
Dictionary-based entity recognition.
"""
def __init__(self, config=parameters.ERParams(), **kwargs):
"""
Loads the terms from file or pickle.
`term_token` is a regular expression pattern defining
a token for constructing a term tokenizer.
It does not have to be the same tokenizer that
is used to tokenize the text in the article, since
the entity recognizer does not rely on that
tokenization.
`cache` is the default folder in which
we check for cached pickle files. A cached file has
the same basename as the term list, plus ".pickle".
If `force_reload` is set, it will load from file in
any case. Use this when the term list has changed.
When loading from file, the term list will be pickled
automatically for faster (up to 20 times)
loading in subsequent calls.
`stopwords` is either an iterable of stopwords
or a path to a list of stopwords (one per line).
"""
self.tokenizer = Text_processing(self._tokenizer_spec(config), None)
self._normalizers = normalization.load(config.normalize)
self.stopwords = self.import_stopwords(config.stopwords)
self.term_first, self.full_terms = self.load_termlist(config, **kwargs)
@classmethod
def ensure_cache(cls, *args, **kwargs):
'''
Make sure there is a pickled version of the termlist.
'''
# Simply create a throw-away instance with the hidden (undocumented)
# kwarg `skip_loading`, which makes the constructor look for the
# pickle file, but doesn't load it.
kwargs['skip_loading'] = True
cls(*args, **kwargs)
@staticmethod
def _tokenizer_spec(config):
if config.term_tokenizer:
return config.term_tokenizer
else:
token = config.term_token or DEFAULT_TOKEN[config.abbrev_detection]
return 'RegexpTokenizer({})'.format(repr(token))
def import_stopwords(self, stopwords):
'''
Resolve the different ways the stopwords are provided.
'''
if isinstance(stopwords, str):
# stopwords is a path:
with open(stopwords) as f:
stopwords = [l.strip() for l in f]
# Any False-equivalent value is interpreted as no stopwords.
stopwords = stopwords or []
# The stopwords are saved and looked up in normalized form.
return frozenset(self.normalize(self.tokenizer.tokenize_words(w))
for w in stopwords)
def load_termlist(self, config, skip_loading=False):
'''
Check for a pickle, or else create one.
After reading the term list into a dictionary,
it has the following internal structure:
key: first token of the term
value: tuple(
[0] = whole term,
[1] = term_type (or category),
[2] = term_preferred_form,
[3] = resource of origin
[4] = native ID (in the respective database),
[5] = UMLS CUI
)
If additional fields were defined through `extra_fields`,
then the value tuple is extended correspondingly.
'''
# Check if pickle with the same file name exists.
if config.path is None:
raise ValueError('no termlist specified')
if config.cache is None:
config.cache = os.path.dirname(config.path)
basename = os.path.basename(config.path)
pickle_file = os.path.join(config.cache, basename + '.pickle')
n_fields = 5 + config.n_extra # 5 std fields besides the term
if os.path.exists(pickle_file) and not config.force_reload:
if skip_loading:
# Optimisation feature:
# Only check for a pickle, but don't load it.
terms = None, None
else:
terms = self.load_termlist_from_pickle(pickle_file, n_fields)
# Load the termlist from file.
else:
try:
parser = getattr(
self, 'termlist_format_{}'.format(config.field_format))
except AttributeError:
logging.error('No such termlist format: %s',
config.field_format)
raise ValueError('Invalid termlist format')
terms = self.load_termlist_from_file(config, parser, n_fields)
try:
self.write_terms_to_pickle(terms, pickle_file)
except OSError as e:
logging.warning('Cannot write termlist pickle: %s (%r)',
pickle_file, e)
return terms
@staticmethod
def load_termlist_from_pickle(pickle_path, n_exp):
'''
Perform a shallow format check before loading.
'''
logging.info('Unpickling terms from %s', pickle_path)
with open(pickle_path, 'rb') as f:
terms = pickle.load(f)
try:
# Make sure we have the right format.
term_first, full_terms = terms
except ValueError:
logging.exception(
'Termlist pickle in obsolete format: %s\n '
'Delete the pickle file or run with force_reload=True.',
pickle_path)
raise
try:
n_found = len(next(iter(full_terms.values()))[0])
except StopIteration:
logging.warning('unpickling empty termlist')
else:
if n_found != n_exp:
logging.error(
'Termlist pickle with wrong number of fields: '
'expected %d, found %d\n '
'Pickle file: %s\n '
'Delete the pickle file or run with force_reload=True.',
n_exp, n_found, pickle_path)
raise ValueError('Termlist pickle with unexpected field count')
logging.info('Terms loaded from pickle.')
return term_first, full_terms
@staticmethod
def write_terms_to_pickle(terms, filename):
'''
Dump everything to disk.
'''
if filename.startswith(stream.REMOTE_PROTOCOLS):
raise OSError('Cannot write pickle to remote location')
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as f:
pickle.dump(terms, f)
logging.info('Terms written to pickle at %s', filename)
def load_termlist_from_file(self, config, field_parser, n_fields):
"""
Index the term DB.
The terms are indexed by the first token of the term
expression.
These keys point to a list of entries.
"""
logging.info("Loading terms from file %s", config.path)
term_first, full_terms = {}, {}
entry = ('',) * n_fields
with stream.ropen(config.path, encoding='utf-8', newline='') as tsv:
reader = csv.reader(tsv, escapechar='\\', **misc.tsv_format)
if config.skip_header:
next(reader)
for line_no, line in enumerate(reader, 1+config.skip_header):
term, std, extra = field_parser(line)
# Apply text processing to the surface term.
toks = tuple(self.tokenizer.tokenize_words(term))
norm = self.normalize(toks)
term = self.em_filter(norm, toks, None, None)
try:
term_first[norm[0]].add(len(term))
except KeyError:
term_first[norm[0]] = set([len(term)])
except IndexError:
logging.warning(
"Skipping line %d: empty term field", line_no)
entry = self._cached_entry(entry, std + extra)
if len(entry) != n_fields:
logging.error(
'Line %d: Wrong field count: %d (expected %d)',
line_no, len(entry)+1, n_fields+1)
raise ValueError('Unexpected number of TSV fields')
try:
full_terms[term].add(entry)
except KeyError:
full_terms[term] = set([entry])
# For memory reasons, replace the sets with tuples.
for k, v in term_first.items():
# Sort the length indicators, so that we can stop early
# when reaching the end of a sentence.
term_first[k] = tuple(sorted(v))
for k, v in full_terms.items():
full_terms[k] = tuple(v)
logging.info("Finished loading termlist.")
return term_first, full_terms
@staticmethod
def termlist_format_4(fields):
'''
Legacy format with 4 columns, native ID first.
[0] ID, [1] term, [2] type, [3] preferred form
'''
term = fields[1]
std = (fields[2], fields[3], 'unknown', fields[0], 'none')
extra = tuple(fields[4:])
return term, std, extra
@staticmethod
def termlist_format_6(fields):
'''
Like the legacy format, but including original DB and UMLS CUI.
[0] native ID, [1] term, [2] type, [3] preferred form,
[4] resource from which it comes, [5] UMLS CUI
'''
term = fields[1]
std = (fields[2], fields[3], fields[4], fields[0], fields[5])
extra = tuple(fields[6:])
return term, std, extra
@staticmethod
def termlist_format_bth(fields):
'''
Format produced by the Bio Term Hub (UMLS CUI first).
[0] UMLS CUI, [1] resource from which it comes,
[2] native ID, [3] term, [4] preferred form, [5] type
'''
term = fields[3]
std = (fields[5], fields[4], fields[1], fields[2], fields[0])
extra = tuple(fields[6:])
return term, std, extra
@staticmethod
def _cached_entry(previous, new):
return tuple(p if p == n else n for p, n in zip(previous, new))
def _normalize(self, token):
'''
Call all normalizer functions in a cascade.
'''
for n in self._normalizers:
token = n(token)
return token
def normalize(self, tokens):
'''
Normalize a sequence of tokens.
'''
return tuple(self._normalize(t) for t in tokens)
def em_filter(self, norm, exact, start, stop):
'''
Enforce exact match for stopwords.
'''
norm = norm[start:stop]
if norm in self.stopwords:
return exact[start:stop]
return norm
def recognize_entities(self, sentence):
"""
Go through all words and try to match them to the terms.
A sentence is an un-tokenized string.
Iterates over the found entities, yielding named tuples:
[0] position: a pair of offsets (start, end)
[1] type
[2] preferred_form
[3] resource (from which it comes)
[4] native_id
[5] umls_cui
* [3] and [5] are only useful if the termlist_format is 6 or bth.
If additional fields were defined in the constructor,
the tuples are extended appropriately.
"""
span_toks = zip(*self.tokenizer.span_tokenize_words(sentence))
try:
toks, starts, ends = span_toks
except ValueError:
# No tokens in this sentence: exit early.
return
normalized = self.normalize(toks)
for i, word in enumerate(normalized):
# There might be multiple entries for the first token in terms:
for ntoks in self.term_first.get(word, ()):
j = i+ntoks
if j > len(normalized):
# Not enough tokens remaining: Exit the inner loop early.
break
candidate = self.em_filter(normalized, toks, i, j)
if candidate in self.full_terms:
position = (starts[i], ends[j-1])
matches = self.full_terms[candidate]
for entry in matches:
yield position, entry
self._match_hook(matches,
sentence, toks, normalized,
position, i, j)
# Some placeholder methods used in subclasses.
@staticmethod
def _match_hook(*_):
'Do something with an entity match in context.'
@staticmethod
def reset():
'Reset to initial state.'
class AbbrevDetector(EntityRecognizer):
'''
Entity recognizer capable of learning new abbreviations.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.abbrevs = {}
self.stopwords = set(self.stopwords) # make this mutable again
def _match_hook(self, *args):
'''
Check for a subsequent abbreviation definition.
'''
matches, _, toks, normalized, _, _, j = args
if toks[j:j+3:2] == ('(', ')'):
self.register_abbrev((toks[j+1],), (normalized[j+1],), matches)
def register_abbrev(self, toks, norm, entries):
'''
Add an abbrev to the hash tables and keep track of the changes.
'''
mod_stopword, mod_first, mod_full = None, None, None
# Enforce an exact match for abbreviations.
if norm not in self.stopwords:
mod_stopword = norm
self.stopwords.add(norm)
# Update the hash tables. There are 3 cases:
# (1) unchanged, (2) new entry, (3) extend existing entry.
# First-token hash:
try:
backup = self.term_first[norm[0]]
except KeyError:
# Case 2.
mod_first = 'pop'
self.term_first[norm[0]] = (len(toks),)
else:
if len(toks) not in backup:
# Case 3.
mod_first = backup
self.term_first[norm[0]] = tuple(sorted((len(toks),) + backup))
# Full-term hash:
try:
backup = self.full_terms[toks]
except KeyError:
# Case 2.
mod_full = 'pop'
self.full_terms[toks] = entries
else:
union = set(backup).union(entries)
if len(union) > len(backup):
# Case 3.
mod_full = backup
self.full_terms[toks] = tuple(union)
# Register the changes.
self.update_registry(toks, mod_stopword, mod_first, mod_full)
def update_registry(self, toks, stpw, first, full):
'''
Merge the new change signature with any previous.
'''
if toks in self.abbrevs:
p_stpw, p_first, p_full = self.abbrevs[toks]
stpw = p_stpw or stpw
first = p_first or first
full = p_full or full
self.abbrevs[toks] = (stpw, first, full)
def clear_abbrev_cache(self):
'Reset the hash tables for a new document.'
for toks, (mod_stopword, mod_first, mod_full) in self.abbrevs.items():
# Undo all the modifications from .register_abbrev().
if mod_stopword:
self.stopwords.remove(mod_stopword)
if mod_first == 'pop':
self.term_first.pop(toks[0], None)
elif mod_first:
self.term_first[toks[0]] = mod_first
if mod_full == 'pop':
self.full_terms.pop(toks, None)
elif mod_full:
self.full_terms[toks] = mod_full
self.abbrevs.clear()
def reset(self):
'Clear the abbreviation cache.'
self.clear_abbrev_cache()
class RegexAbbrevDetector(AbbrevDetector):
'''
Regex-based, tokenisation-independet abbreviation detector.
'''
def __init__(self, *args, abbrevpattern=r'\s+\((\w+)\)', **kwargs):
super().__init__(*args, **kwargs)
self.abbrevpattern = re.compile(abbrevpattern)
def _match_hook(self, *args):
matches, sentence, _, _, position, _, _ = args
m = self.abbrevpattern.match(sentence[position[1]:])
if m:
toks = tuple(self.tokenizer.tokenize_words(m.group(1)))
norm = self.normalize(toks)
self.register_abbrev(toks, norm, matches)
def recognize_entities(self, sentence):
for entity in super().recognize_entities(sentence):
yield entity | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/db/backends/creation.py | import hashlib
import sys
import time
from django.conf import settings
from django.db.utils import load_backend
from django.utils.encoding import force_bytes
from django.utils.six.moves import input
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(
f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, i < len(table_output) - 1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(field.rel.to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
from django.db.backends.util import truncate_name
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return []
qn = self.connection.ops.quote_name
final_output = []
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_indexes_for_fields(model, fields, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_indexes_for_fields(model, [f], style)
else:
return []
def sql_indexes_for_fields(self, model, fields, style):
from django.db.backends.util import truncate_name
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("CREATE INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
style.SQL_KEYWORD("ON") + " " +
style.SQL_TABLE(qn(model._meta.db_table)) + " " +
"(%s)" % style.SQL_FIELD(", ".join(field_names)) +
"%s;" % tablespace_sql,
]
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table,
database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
# Temporarily use a new connection and a copy of the settings dict.
# This prevents the production database from being exposed to potential
# child threads while (or after) the test database is destroyed.
# Refs #10868 and #17786.
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = old_database_name
backend = load_backend(settings_dict['ENGINE'])
new_connection = backend.DatabaseWrapper(
settings_dict,
alias='__destroy_test_db__',
allow_thread_sharing=False)
new_connection.creation._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"""
Make sure a connection is in autocommit mode. - Deprecated, not used
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
pass
def _prepare_for_test_db_ddl(self):
"""
Internal implementation - Hook for tasks that should be performed
before the ``CREATE DATABASE``/``DROP DATABASE`` clauses used by
testing code to create/ destroy test databases. Needed e.g. in
PostgreSQL to rollback and close any active transaction.
"""
pass
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
) | PypiClean |
/IsPycharmRun-1.0.tar.gz/IsPycharmRun-1.0/pb_py/sc_msg_dailyevent_pb2.py |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import msg_base_pb2 as msg__base__pb2
import msg_common_pb2 as msg__common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sc_msg_dailyevent.proto',
package='FunPlus.Common.Config',
syntax='proto2',
serialized_options=b'H\001Z\022server/pkg/gen/msg',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x17sc_msg_dailyevent.proto\x12\x15\x46unPlus.Common.Config\x1a\x0emsg_base.proto\x1a\x10msg_common.proto\"6\n\x17PBAllDailyEventsRequest\x12\x13\n\x0bis_open_cmp\x18\x01 \x01(\x08:\x06\x80}\x81\xd8\x80\x05\"\x90\x01\n\x18PBAllDailyEventsResponse\x12\x41\n\x10\x64\x61ily_event_list\x18\x01 \x03(\x0b\x32\'.FunPlus.Common.Config.PBDailyEventData\x12\x12\n\nweek_index\x18\x02 \x01(\r\x12\x15\n\rnew_event_ids\x18\x03 \x03(\t:\x06\x80}\x82\xd8\x80\x05\"O\n\x1cPBDailyEventPlotBeginRequest\x12\x0f\n\x07plot_id\x18\x01 \x01(\x05\x12\x16\n\x0e\x64\x61ily_event_id\x18\x02 \x01(\t:\x06\x80}\x83\xd8\x80\x05\"\\\n\x1dPBDailyEventPlotBeginResponse\x12\x0f\n\x07plot_id\x18\x01 \x01(\x05\x12\x16\n\x0e\x64\x61ily_event_id\x18\x02 \x01(\t\x12\n\n\x02ok\x18\x03 \x01(\x08:\x06\x80}\x84\xd8\x80\x05\"M\n\x1aPBDailyEventPlotEndRequest\x12\x0f\n\x07plot_id\x18\x01 \x01(\x05\x12\x16\n\x0e\x64\x61ily_event_id\x18\x02 \x01(\t:\x06\x80}\x85\xd8\x80\x05\"N\n\x1bPBDailyEventPlotEndResponse\x12\x0f\n\x07plot_id\x18\x01 \x01(\x05\x12\x16\n\x0e\x64\x61ily_event_id\x18\x02 \x01(\t:\x06\x80}\x86\xd8\x80\x05\x42\x16H\x01Z\x12server/pkg/gen/msg'
,
dependencies=[msg__base__pb2.DESCRIPTOR,msg__common__pb2.DESCRIPTOR,])
_PBALLDAILYEVENTSREQUEST = _descriptor.Descriptor(
name='PBAllDailyEventsRequest',
full_name='FunPlus.Common.Config.PBAllDailyEventsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='is_open_cmp', full_name='FunPlus.Common.Config.PBAllDailyEventsRequest.is_open_cmp', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\200}\201\330\200\005',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=138,
)
_PBALLDAILYEVENTSRESPONSE = _descriptor.Descriptor(
name='PBAllDailyEventsResponse',
full_name='FunPlus.Common.Config.PBAllDailyEventsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='daily_event_list', full_name='FunPlus.Common.Config.PBAllDailyEventsResponse.daily_event_list', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='week_index', full_name='FunPlus.Common.Config.PBAllDailyEventsResponse.week_index', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='new_event_ids', full_name='FunPlus.Common.Config.PBAllDailyEventsResponse.new_event_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\200}\202\330\200\005',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=285,
)
_PBDAILYEVENTPLOTBEGINREQUEST = _descriptor.Descriptor(
name='PBDailyEventPlotBeginRequest',
full_name='FunPlus.Common.Config.PBDailyEventPlotBeginRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plot_id', full_name='FunPlus.Common.Config.PBDailyEventPlotBeginRequest.plot_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='daily_event_id', full_name='FunPlus.Common.Config.PBDailyEventPlotBeginRequest.daily_event_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\200}\203\330\200\005',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=287,
serialized_end=366,
)
_PBDAILYEVENTPLOTBEGINRESPONSE = _descriptor.Descriptor(
name='PBDailyEventPlotBeginResponse',
full_name='FunPlus.Common.Config.PBDailyEventPlotBeginResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plot_id', full_name='FunPlus.Common.Config.PBDailyEventPlotBeginResponse.plot_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='daily_event_id', full_name='FunPlus.Common.Config.PBDailyEventPlotBeginResponse.daily_event_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ok', full_name='FunPlus.Common.Config.PBDailyEventPlotBeginResponse.ok', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\200}\204\330\200\005',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=368,
serialized_end=460,
)
_PBDAILYEVENTPLOTENDREQUEST = _descriptor.Descriptor(
name='PBDailyEventPlotEndRequest',
full_name='FunPlus.Common.Config.PBDailyEventPlotEndRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plot_id', full_name='FunPlus.Common.Config.PBDailyEventPlotEndRequest.plot_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='daily_event_id', full_name='FunPlus.Common.Config.PBDailyEventPlotEndRequest.daily_event_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\200}\205\330\200\005',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=462,
serialized_end=539,
)
_PBDAILYEVENTPLOTENDRESPONSE = _descriptor.Descriptor(
name='PBDailyEventPlotEndResponse',
full_name='FunPlus.Common.Config.PBDailyEventPlotEndResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plot_id', full_name='FunPlus.Common.Config.PBDailyEventPlotEndResponse.plot_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='daily_event_id', full_name='FunPlus.Common.Config.PBDailyEventPlotEndResponse.daily_event_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\200}\206\330\200\005',
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=541,
serialized_end=619,
)
_PBALLDAILYEVENTSRESPONSE.fields_by_name['daily_event_list'].message_type = msg__common__pb2._PBDAILYEVENTDATA
DESCRIPTOR.message_types_by_name['PBAllDailyEventsRequest'] = _PBALLDAILYEVENTSREQUEST
DESCRIPTOR.message_types_by_name['PBAllDailyEventsResponse'] = _PBALLDAILYEVENTSRESPONSE
DESCRIPTOR.message_types_by_name['PBDailyEventPlotBeginRequest'] = _PBDAILYEVENTPLOTBEGINREQUEST
DESCRIPTOR.message_types_by_name['PBDailyEventPlotBeginResponse'] = _PBDAILYEVENTPLOTBEGINRESPONSE
DESCRIPTOR.message_types_by_name['PBDailyEventPlotEndRequest'] = _PBDAILYEVENTPLOTENDREQUEST
DESCRIPTOR.message_types_by_name['PBDailyEventPlotEndResponse'] = _PBDAILYEVENTPLOTENDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PBAllDailyEventsRequest = _reflection.GeneratedProtocolMessageType('PBAllDailyEventsRequest', (_message.Message,), {
'DESCRIPTOR' : _PBALLDAILYEVENTSREQUEST,
'__module__' : 'sc_msg_dailyevent_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBAllDailyEventsRequest)
})
_sym_db.RegisterMessage(PBAllDailyEventsRequest)
PBAllDailyEventsResponse = _reflection.GeneratedProtocolMessageType('PBAllDailyEventsResponse', (_message.Message,), {
'DESCRIPTOR' : _PBALLDAILYEVENTSRESPONSE,
'__module__' : 'sc_msg_dailyevent_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBAllDailyEventsResponse)
})
_sym_db.RegisterMessage(PBAllDailyEventsResponse)
PBDailyEventPlotBeginRequest = _reflection.GeneratedProtocolMessageType('PBDailyEventPlotBeginRequest', (_message.Message,), {
'DESCRIPTOR' : _PBDAILYEVENTPLOTBEGINREQUEST,
'__module__' : 'sc_msg_dailyevent_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBDailyEventPlotBeginRequest)
})
_sym_db.RegisterMessage(PBDailyEventPlotBeginRequest)
PBDailyEventPlotBeginResponse = _reflection.GeneratedProtocolMessageType('PBDailyEventPlotBeginResponse', (_message.Message,), {
'DESCRIPTOR' : _PBDAILYEVENTPLOTBEGINRESPONSE,
'__module__' : 'sc_msg_dailyevent_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBDailyEventPlotBeginResponse)
})
_sym_db.RegisterMessage(PBDailyEventPlotBeginResponse)
PBDailyEventPlotEndRequest = _reflection.GeneratedProtocolMessageType('PBDailyEventPlotEndRequest', (_message.Message,), {
'DESCRIPTOR' : _PBDAILYEVENTPLOTENDREQUEST,
'__module__' : 'sc_msg_dailyevent_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBDailyEventPlotEndRequest)
})
_sym_db.RegisterMessage(PBDailyEventPlotEndRequest)
PBDailyEventPlotEndResponse = _reflection.GeneratedProtocolMessageType('PBDailyEventPlotEndResponse', (_message.Message,), {
'DESCRIPTOR' : _PBDAILYEVENTPLOTENDRESPONSE,
'__module__' : 'sc_msg_dailyevent_pb2'
# @@protoc_insertion_point(class_scope:FunPlus.Common.Config.PBDailyEventPlotEndResponse)
})
_sym_db.RegisterMessage(PBDailyEventPlotEndResponse)
DESCRIPTOR._options = None
_PBALLDAILYEVENTSREQUEST._options = None
_PBALLDAILYEVENTSRESPONSE._options = None
_PBDAILYEVENTPLOTBEGINREQUEST._options = None
_PBDAILYEVENTPLOTBEGINRESPONSE._options = None
_PBDAILYEVENTPLOTENDREQUEST._options = None
_PBDAILYEVENTPLOTENDRESPONSE._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/Appium-Flutter-Finder-0.4.0.tar.gz/Appium-Flutter-Finder-0.4.0/appium_flutter_finder/flutter_finder.py | import base64
import json
from appium.webdriver.webelement import WebElement
class FlutterElement(WebElement):
pass
class FlutterFinder:
def by_ancestor(self, serialized_finder, matching, match_root=False, first_match_only=False):
return self._by_ancestor_or_descendant(
type_='Ancestor',
serialized_finder=serialized_finder,
matching=matching,
match_root=match_root,
first_match_only=first_match_only
)
def by_descendant(self, serialized_finder, matching, match_root=False, first_match_only=False):
return self._by_ancestor_or_descendant(
type_='Descendant',
serialized_finder=serialized_finder,
matching=matching,
match_root=match_root,
first_match_only=first_match_only
)
def by_semantics_label(self, label, isRegExp=False):
return self._serialize(dict(
finderType='BySemanticsLabel',
isRegExp=isRegExp,
label=label
))
def by_tooltip(self, text):
return self._serialize(dict(
finderType='ByTooltipMessage',
text=text
))
def by_text(self, text):
return self._serialize(dict(
finderType='ByText',
text=text
))
def by_type(self, type_):
return self._serialize(dict(
finderType='ByType',
type=type_
))
def by_value_key(self, key):
return self._serialize(dict(
finderType='ByValueKey',
keyValueString=key,
keyValueType='String' if isinstance(key, str) else 'int'
))
def page_back(self):
return self._serialize(dict(
finderType='PageBack'
))
def _serialize(self, finder_dict):
return base64.b64encode(
bytes(json.dumps(finder_dict, separators=(',', ':')), 'UTF-8')).decode('UTF-8')
def _by_ancestor_or_descendant(self, type_, serialized_finder, matching, match_root=False, first_match_only=False):
param = dict(finderType=type_, matchRoot=match_root, firstMatchOnly=first_match_only)
try:
finder = json.loads(base64.b64decode(
serialized_finder).decode('utf-8'))
except Exception:
finder = {}
param.setdefault('of', {})
for finder_key, finder_value in finder.items():
param['of'].setdefault(finder_key, finder_value)
param['of'] = json.dumps(param['of'], separators=(',', ':'))
try:
matching = json.loads(base64.b64decode(matching).decode('utf-8'))
except Exception:
matching = {}
param.setdefault('matching', {})
for matching_key, matching_value in matching.items():
param['matching'].setdefault(matching_key, matching_value)
param['matching'] = json.dumps(param['matching'], separators=(',', ':'))
return self._serialize(param) | PypiClean |
/LimeReport-qt-6-4-1.7.5.tar.gz/LimeReport-qt-6-4-1.7.5/LimeReport/translations/limereport_zh.ts | <?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="zh_CN">
<context>
<name>$ClassName$</name>
<message>
<source>$ClassName$</source>
<translation>$ClassName$</translation>
</message>
</context>
<context>
<name>ChartAxisEditor</name>
<message>
<source>Axis editor</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Axis</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Reverse direction</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Enable scale calculation</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Step</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Maximum</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Minimum</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Automatic</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Cancel</source>
<translation type="unfinished">取消</translation>
</message>
<message>
<source>Ok</source>
<translation type="unfinished">确定</translation>
</message>
</context>
<context>
<name>ChartItemEditor</name>
<message>
<source>Series editor</source>
<translation>数据系列编辑器</translation>
</message>
<message>
<source>Series</source>
<translation>数据系列</translation>
</message>
<message>
<source>Add</source>
<translation>增加</translation>
</message>
<message>
<source>Delete</source>
<translation>删除</translation>
</message>
<message>
<source>Name</source>
<translation>名称</translation>
</message>
<message>
<source>Values field</source>
<translation>取值字段</translation>
</message>
<message>
<source>Color</source>
<translation>颜色</translation>
</message>
<message>
<source>Type</source>
<translation>类型</translation>
</message>
<message>
<source>Labels field</source>
<translation>标签字段</translation>
</message>
<message>
<source>Ok</source>
<translation>确定</translation>
</message>
<message>
<source>Series name</source>
<translation>系列名称</translation>
</message>
<message>
<source>X data field</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>ImageItemEditor</name>
<message>
<source>Image Item Editor</source>
<translation>图像组件编辑</translation>
</message>
<message>
<source>Image</source>
<translation>图像</translation>
</message>
<message>
<source>...</source>
<translation>...</translation>
</message>
<message>
<source>Resource path</source>
<translation>资源路径</translation>
</message>
<message>
<source>Select image file</source>
<translation>选择图像文件</translation>
</message>
</context>
<context>
<name>LRVariableDialog</name>
<message>
<source>Variable</source>
<translation>变量</translation>
</message>
<message>
<source>Name</source>
<translation>名称</translation>
</message>
<message>
<source>Value</source>
<translation>值</translation>
</message>
<message>
<source>Type</source>
<translation>类型</translation>
</message>
<message>
<source>Attention</source>
<translation>注意</translation>
</message>
<message>
<source>Mandatory</source>
<translation>必要</translation>
</message>
</context>
<context>
<name>LanguageSelectDialog</name>
<message>
<source>Dialog</source>
<translation>对话框</translation>
</message>
<message>
<source>Language</source>
<translation>语言</translation>
</message>
</context>
<context>
<name>LimeReport::AboutDialog</name>
<message>
<source>About</source>
<translation>关于</translation>
</message>
<message>
<source>Lime Report</source>
<translation></translation>
</message>
<message>
<source>Author</source>
<translation>作者</translation>
</message>
<message>
<source><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Arin Alexander</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">[email protected]</p></body></html></source>
<translation></translation>
</message>
<message>
<source>License</source>
<translation>许可</translation>
</message>
<message>
<source><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">(c) 2015 Arin Alexander [email protected]</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><a name="SEC1"></a><span style=" font-family:'sans-serif'; font-weight:600;">G</span><span style=" font-family:'sans-serif'; font-weight:600;">NU LESSER GENERAL PUBLIC LICENSE</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Version 2.1, February 1999</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Copyright (C) 1991, 1999 Free Software Foundation, Inc.</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Everyone is permitted to copy and distribute verbatim copies</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">of this license document, but changing it is not allowed.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'monospace';"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">[This is the first released version of the Lesser GPL. It also counts</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';"> as the successor of the GNU Library Public License, version 2, hence</span></p>
<p style=" margin-top:0px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';"> the version number 2.1.]</span></p>
<p style=" margin-top:15px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><a name="SEC2"></a><span style=" font-family:'sans-serif'; font-weight:600;">P</span><span style=" font-family:'sans-serif'; font-weight:600;">reamble</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">We call this license the &quot;Lesser&quot; General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a &quot;work based on the library&quot; and a &quot;work that uses the library&quot;. The former contains code derived from the library, whereas the latter must be combined with the library in order to run.</span></p>
<p style=" margin-top:15px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><a name="SEC3"></a><span style=" font-family:'sans-serif'; font-weight:600; color:#333333;">T</span><span style=" font-family:'sans-serif'; font-weight:600; color:#333333;">ERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">0.</span><span style=" font-family:'sans-serif';"> This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called &quot;this License&quot;). Each licensee is addressed as &quot;you&quot;.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">A &quot;library&quot; means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">The &quot;Library&quot;, below, refers to any such software library or work which has been distributed under these terms. A &quot;work based on the Library&quot; means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term &quot;modification&quot;.)</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">&quot;Source code&quot; for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">1.</span><span style=" font-family:'sans-serif';"> You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">2.</span><span style=" font-family:'sans-serif';"> You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:</span></p>
<ul style="margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 1;"><li style=" font-family:'sans-serif';" style=" margin-top:19px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">a)</span><span style=" font-size:16px;"> The modified work must itself be a software library.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">b)</span><span style=" font-size:16px;"> You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">c)</span><span style=" font-size:16px;"> You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:19px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">d)</span><span style=" font-size:16px;"> If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful.</span></li></ul>
<p style=" margin-top:15px; margin-bottom:15px; margin-left:38px; margin-right:19px; -qt-block-indent:1; text-indent:0px;"><span style=" font-family:'sans-serif';">(For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.)</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">3.</span><span style=" font-family:'sans-serif';"> You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">This option is useful when you wish to copy part of the code of the Library into a program that is not a library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">4.</span><span style=" font-family:'sans-serif';"> You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">5.</span><span style=" font-family:'sans-serif';"> A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a &quot;work that uses the Library&quot;. Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">However, linking a &quot;work that uses the Library&quot; with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a &quot;work that uses the library&quot;. The executable is therefore covered by this License. Section 6 states terms for distribution of such executables.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">When a &quot;work that uses the Library&quot; uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.)</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">6.</span><span style=" font-family:'sans-serif';"> As an exception to the Sections above, you may also combine or link a &quot;work that uses the Library&quot; with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things:</span></p>
<ul style="margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 1;"><li style=" font-family:'sans-serif';" style=" margin-top:19px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">a)</span><span style=" font-size:16px;"> Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable &quot;work that uses the Library&quot;, as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.)</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">b)</span><span style=" font-size:16px;"> Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">c)</span><span style=" font-size:16px;"> Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">d)</span><span style=" font-size:16px;"> If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:19px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">e)</span><span style=" font-size:16px;"> Verify that the user has already received a copy of these materials or that you have already sent this user a copy.</span></li></ul>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">For an executable, the required form of the &quot;work that uses the Library&quot; must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">7.</span><span style=" font-family:'sans-serif';"> You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things:</span></p>
<ul style="margin-top: 0px; margin-bottom: 0px; margin-left: 0px; margin-right: 0px; -qt-list-indent: 1;"><li style=" font-family:'sans-serif';" style=" margin-top:19px; margin-bottom:0px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">a)</span><span style=" font-size:16px;"> Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above.</span></li>
<li style=" font-family:'sans-serif';" style=" margin-top:0px; margin-bottom:19px; margin-left:38px; margin-right:19px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:16px; font-weight:600;">b)</span><span style=" font-size:16px;"> Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work.</span></li></ul>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">8.</span><span style=" font-family:'sans-serif';"> You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">9.</span><span style=" font-family:'sans-serif';"> You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">10.</span><span style=" font-family:'sans-serif';"> Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">11.</span><span style=" font-family:'sans-serif';"> If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">12.</span><span style=" font-family:'sans-serif';"> If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">13.</span><span style=" font-family:'sans-serif';"> The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and &quot;any later version&quot;, you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">14.</span><span style=" font-family:'sans-serif';"> If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">NO WARRANTY</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">15.</span><span style=" font-family:'sans-serif';"> BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY &quot;AS IS&quot; WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600;">16.</span><span style=" font-family:'sans-serif';"> IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.</span></p>
<p style=" margin-top:15px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif'; font-weight:600; color:#333333;">END OF TERMS AND CONDITIONS</span></p>
<p style=" margin-top:15px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><a name="SEC4"></a><span style=" font-family:'sans-serif'; font-weight:600; color:#333333;">H</span><span style=" font-family:'sans-serif'; font-weight:600; color:#333333;">ow to Apply These Terms to Your New Libraries</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License).</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the &quot;copyright&quot; line and a pointer to where the full notice is found.</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace'; font-style:italic;">one line to give the library's name and an idea of what it does.</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Copyright (C) </span><span style=" font-family:'monospace'; font-style:italic;">year</span><span style=" font-family:'monospace';"> </span><span style=" font-family:'monospace'; font-style:italic;">name of author</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'monospace'; font-style:italic;"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">This library is free software; you can redistribute it and/or</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">modify it under the terms of the GNU Lesser General Public</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">License as published by the Free Software Foundation; either</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">version 2.1 of the License, or (at your option) any later version.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'monospace';"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">This library is distributed in the hope that it will be useful,</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">but WITHOUT ANY WARRANTY; without even the implied warranty of</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Lesser General Public License for more details.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'monospace';"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">You should have received a copy of the GNU Lesser General Public</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">License along with this library; if not, write to the Free Software</span></p>
<p style=" margin-top:0px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">Also add information on how to contact you by electronic and paper mail.</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">You should also get your employer (if you work as a programmer) or your school, if any, to sign a &quot;copyright disclaimer&quot; for the library, if necessary. Here is a sample; alter the names:</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Yoyodyne, Inc., hereby disclaims all copyright interest in</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">the library `Frob' (a library for tweaking knobs) written</span></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">by James Random Hacker.</span></p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'monospace';"><br /></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace'; font-style:italic;">signature of Ty Coon</span><span style=" font-family:'monospace';">, 1 April 1990</span></p>
<p style=" margin-top:0px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'monospace';">Ty Coon, President of Vice</span></p>
<p style=" margin-top:19px; margin-bottom:19px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'sans-serif';">That's all there is to it!</span></p></body></html></source>
<translation></translation>
</message>
<message>
<source>Close</source>
<translation>关闭</translation>
</message>
<message>
<source>Version 1.1.1</source>
<translation>版本 1.1.1</translation>
</message>
<message>
<source><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><img src=":/report/images/logo_100.png" height="100" style="float: left;" /><span style=" font-size:12pt; font-weight:600;">Report engine for </span><span style=" font-size:12pt; font-weight:600; color:#7faa18;">Qt</span><span style=" font-size:12pt; font-weight:600;"> framework</span></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">LimeReport - multi-platform C++ library written using Qt framework and intended for software developers that would like to add into their application capability to form report or print forms generated using templates.</span></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt;"><br /></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt;"><br /></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">Official web site : </span><a href="www.limereport.ru"><span style=" font-size:11pt; text-decoration: underline; color:#0000ff;">www.limereport.ru</span></a></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt; text-decoration: underline; color:#0000ff;"><br /></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-weight:600;">This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.</span></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt; font-weight:600; color:#000000;"><br /></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt;">Copyright 2021 Arin Alexander. All rights reserved.</span></p></body></html></source>
<translation type="unfinished"><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:9pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><img src=":/report/images/logo_100.png" height="100" style="float: left;" /><span style=" font-size:12pt; font-weight:600; color:#7faa18;">Qt</span><span style=" font-size:12pt; font-weight:600;">报表引擎</span></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">LimeReport - QT框架多平台C++库,帮助开发者给应用增加基于模板生成报表及打印报表功能。</span></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt;"><br /></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt;"><br /></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:11pt;">官方网站: </span><a href="www.limereport.ru"><span style=" font-size:11pt; text-decoration: underline; color:#0000ff;">www.limereport.ru</span></a></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt; text-decoration: underline; color:#0000ff;"><br /></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-weight:600;">该库基于提供帮助目的发布,但不提供任何担保,不以任何形式提供其适销性或适用于某一特定用途的默示保证。</span></p>
<p align="justify" style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt; font-weight:600; color:#000000;"><br /></p>
<p align="justify" style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt;">版权 2015 Arin Alexander.所有权利保留.</span></p></body></html> {3C?} {4.0/?} {3.?} {40/?} {1"?} {9p?} {400;?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {100.?} {100"?} {12p?} {600;?} {12p?} {600;?} {7f?} {18;?} {12p?} {600;?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {11p?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {11p?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {11p?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {11p?} {11p?} {0000f?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {11p?} {0000f?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {10p?} {600;?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {10p?} {600;?} {000000;?} {0p?} {0p?} {0p?} {0p?} {0;?} {0p?} {10p?} {2021 ?}</translation>
</message>
</context>
<context>
<name>LimeReport::AlignmentPropItem</name>
<message>
<source>Left</source>
<translation>左</translation>
</message>
<message>
<source>Right</source>
<translation>右</translation>
</message>
<message>
<source>Center</source>
<translation>居中</translation>
</message>
<message>
<source>Justify</source>
<translation>对齐</translation>
</message>
<message>
<source>Top</source>
<translation>顶</translation>
</message>
<message>
<source>Botom</source>
<translation>底</translation>
</message>
<message>
<source>horizontal</source>
<translation>水平</translation>
</message>
<message>
<source>vertical</source>
<translation>垂直</translation>
</message>
</context>
<context>
<name>LimeReport::BandDesignIntf</name>
<message>
<source>DataBand</source>
<translation>数据带</translation>
</message>
<message>
<source>DataHeaderBand</source>
<translation>数据带头</translation>
</message>
<message>
<source>DataFooterBand</source>
<translation>数据带脚</translation>
</message>
<message>
<source>ReportHeader</source>
<translation>表头</translation>
</message>
<message>
<source>ReportFooter</source>
<translation>表脚</translation>
</message>
<message>
<source>PageHeader</source>
<translation>页眉</translation>
</message>
<message>
<source>PageFooter</source>
<translation>页脚</translation>
</message>
<message>
<source>SubDetailBand</source>
<translation>子细节带</translation>
</message>
<message>
<source>SubDetailHeaderBand</source>
<translation>子细节带头</translation>
</message>
<message>
<source>SubDetailFooterBand</source>
<translation>子细节带脚</translation>
</message>
<message>
<source>GroupBandHeader</source>
<translation>组带头</translation>
</message>
<message>
<source>GroupBandFooter</source>
<translation>组带脚</translation>
</message>
<message>
<source>TearOffBand</source>
<translation>分离带</translation>
</message>
<message>
<source> connected to </source>
<translation> 连接到 </translation>
</message>
<message>
<source>Bring to top</source>
<translation>置顶</translation>
</message>
<message>
<source>Send to back</source>
<translation>置底</translation>
</message>
<message>
<source>Auto height</source>
<translation>自动高度</translation>
</message>
<message>
<source>Splittable</source>
<translation>可拆分</translation>
</message>
<message>
<source>Keep bottom space</source>
<translation>保持底部距离</translation>
</message>
<message>
<source>Cut</source>
<translation>剪切</translation>
</message>
<message>
<source>Copy</source>
<translation>复制</translation>
</message>
<message>
<source>Print if empty</source>
<translation>为空时打印</translation>
</message>
<message>
<source>Keep top space</source>
<translation>保持顶部距离</translation>
</message>
</context>
<context>
<name>LimeReport::BaseDesignIntf</name>
<message>
<source>Copy</source>
<translation>复制</translation>
</message>
<message>
<source>Cut</source>
<translation>剪切</translation>
</message>
<message>
<source>Paste</source>
<translation>粘贴</translation>
</message>
<message>
<source>Bring to top</source>
<translation>置顶</translation>
</message>
<message>
<source>Send to back</source>
<translation>置底</translation>
</message>
<message>
<source>No borders</source>
<translation>无边框</translation>
</message>
<message>
<source>All borders</source>
<translation>所有边框</translation>
</message>
<message>
<source>Create Horizontal Layout</source>
<translation>创建水平布局</translation>
</message>
<message>
<source>Lock item geometry</source>
<translation>锁定组件形状</translation>
</message>
<message>
<source>Create Vertical Layout</source>
<translation>创建水平布局</translation>
</message>
<message>
<source>Edit borders...</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>LimeReport::BorderFrameEditor</name>
<message>
<source>BorderFrameEditor</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Text</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>LimeReport::ConnectionDesc</name>
<message>
<source>defaultConnection</source>
<translation>默认连接</translation>
</message>
</context>
<context>
<name>LimeReport::ConnectionDialog</name>
<message>
<source>Connection</source>
<translation>数据连接</translation>
</message>
<message>
<source>Connection Name</source>
<translation>连接名称</translation>
</message>
<message>
<source>Use default application connection</source>
<translation>使用默认应用连接</translation>
</message>
<message>
<source>Driver</source>
<translation>驱动</translation>
</message>
<message>
<source>Server </source>
<translation>服务器 </translation>
</message>
<message>
<source>Port</source>
<translation>端口</translation>
</message>
<message>
<source>User</source>
<translation>用户名</translation>
</message>
<message>
<source>Password</source>
<translation>密码</translation>
</message>
<message>
<source>Database</source>
<translation>数据库</translation>
</message>
<message>
<source>...</source>
<translation></translation>
</message>
<message>
<source>Auto connect</source>
<translation>自动连接</translation>
</message>
<message>
<source>Check connection</source>
<translation>检查连接</translation>
</message>
<message>
<source>Cancel</source>
<translation>取消</translation>
</message>
<message>
<source>Ok</source>
<translation>确定</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>Connection succsesfully established!</source>
<translation>连接成功建立!</translation>
</message>
<message>
<source>Connection Name is empty</source>
<translation>连接名为空</translation>
</message>
<message>
<source>Connection with name </source>
<translation>连接 </translation>
</message>
<message>
<source> already exists! </source>
<translation> 已经存在! </translation>
</message>
<message>
<source>defaultConnection</source>
<translation>默认连接</translation>
</message>
<message>
<source>Don't keep credentials in lrxml</source>
<translation>不在lrxml文件中保存凭证</translation>
</message>
</context>
<context>
<name>LimeReport::DataBand</name>
<message>
<source>Data</source>
<translation>数据带</translation>
</message>
<message>
<source>Use alternate background color</source>
<translation>使用交替背景色</translation>
</message>
<message>
<source>Keep footer together</source>
<translation>保持页脚</translation>
</message>
<message>
<source>Keep subdetail together</source>
<translation>保持子细节脚</translation>
</message>
<message>
<source>Slice last row</source>
<translation>分割末行</translation>
</message>
<message>
<source>Start from new page</source>
<translation>从新页开始</translation>
</message>
<message>
<source>Start new page</source>
<translation>开始新页</translation>
</message>
</context>
<context>
<name>LimeReport::DataBrowser</name>
<message>
<source>Datasources</source>
<translation>数据源</translation>
</message>
<message>
<source>Add database connection</source>
<translation>新建数据库连接</translation>
</message>
<message>
<source>...</source>
<translation></translation>
</message>
<message>
<source>Add new datasource</source>
<translation>新建数据源</translation>
</message>
<message>
<source>View data</source>
<translation>查看数据</translation>
</message>
<message>
<source>Change datasource</source>
<translation>编辑数据源</translation>
</message>
<message>
<source>Delete datasource</source>
<translation>删除数据源</translation>
</message>
<message>
<source>Show error</source>
<translation>显示错误</translation>
</message>
<message>
<source>Variables</source>
<translation>变量</translation>
</message>
<message>
<source>Add new variable</source>
<translation>新增变量</translation>
</message>
<message>
<source>Edit variable</source>
<translation>编辑变量</translation>
</message>
<message>
<source>Delete variable</source>
<translation>删除变量</translation>
</message>
<message>
<source>Grab variable</source>
<translation>取得变量</translation>
</message>
<message>
<source>Attention</source>
<translation>注意</translation>
</message>
<message>
<source>Do you really want to delete "%1" connection?</source>
<translation>是否确认删除"%1"连接?</translation>
</message>
<message>
<source>Report variables</source>
<translation>报表变量</translation>
</message>
<message>
<source>System variables</source>
<translation>系统变量</translation>
</message>
<message>
<source>External variables</source>
<translation>外部变量</translation>
</message>
<message>
<source>Do you really want to delete "%1" datasource?</source>
<translation>是否确认删除"%1"数据源?</translation>
</message>
<message>
<source>Do you really want to delete variable "%1"?</source>
<translation>是否确认删除变量"%1"?</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
</context>
<context>
<name>LimeReport::DataFooterBand</name>
<message>
<source>DataFooter</source>
<translation>数据带脚</translation>
</message>
<message>
<source>Print always</source>
<translation>始终打印</translation>
</message>
</context>
<context>
<name>LimeReport::DataHeaderBand</name>
<message>
<source>DataHeader</source>
<translation>数据带头</translation>
</message>
<message>
<source>Reprint on each page</source>
<translation>重新打印每页</translation>
</message>
<message>
<source>Repeat on each row</source>
<translation>每行重复</translation>
</message>
<message>
<source>Print always</source>
<translation>始终打印</translation>
</message>
</context>
<context>
<name>LimeReport::DataSourceManager</name>
<message>
<source>Connection "%1" is not open</source>
<translation>连接"%1"没有打开</translation>
</message>
<message>
<source>Variable "%1" not found!</source>
<translation>未找到变量"%1"!</translation>
</message>
<message>
<source>Datasource "%1" not found!</source>
<translation>未找到数据源"%1"!</translation>
</message>
<message>
<source>Connection with name "%1" already exists!</source>
<translation>连接 "%1" 已存在!</translation>
</message>
<message>
<source>Datasource with name "%1" already exists!</source>
<translation>数据源 "%1" 已存在!</translation>
</message>
<message>
<source>Database "%1" not found</source>
<translation>未找到数据库 "%1"</translation>
</message>
<message>
<source>invalid connection</source>
<translation>无效连接</translation>
</message>
<message>
<source>Unknown parameter "%1" for variable "%2" found!</source>
<translation>变量"%2"参数"%1"未知!</translation>
</message>
</context>
<context>
<name>LimeReport::DataSourceModel</name>
<message>
<source>Datasources</source>
<translation>数据源</translation>
</message>
<message>
<source>Variables</source>
<translation>变量</translation>
</message>
<message>
<source>External variables</source>
<translation>外部变量</translation>
</message>
</context>
<context>
<name>LimeReport::DialogDesignerManager</name>
<message>
<source>Edit Widgets</source>
<translation>编辑组件</translation>
</message>
<message>
<source>Widget Box</source>
<translation>组件盒</translation>
</message>
<message>
<source>Object Inspector</source>
<translation>对象观察器</translation>
</message>
<message>
<source>Property Editor</source>
<translation>属性编辑器</translation>
</message>
<message>
<source>Signals && Slots Editor</source>
<translation>信号槽编辑器</translation>
</message>
<message>
<source>Resource Editor</source>
<translation>资源编辑器</translation>
</message>
<message>
<source>Action Editor</source>
<translation>动作编辑器</translation>
</message>
</context>
<context>
<name>LimeReport::EnumPropItem</name>
<message>
<source>Default</source>
<translation>默认</translation>
</message>
<message>
<source>Portrait</source>
<translation>纵向</translation>
</message>
<message>
<source>Landscape</source>
<translation>横向</translation>
</message>
<message>
<source>NoneAutoWidth</source>
<translation>无自动宽度</translation>
</message>
<message>
<source>MaxWordLength</source>
<translation>最大词长</translation>
</message>
<message>
<source>MaxStringLength</source>
<translation>最大字符串长</translation>
</message>
<message>
<source>TransparentMode</source>
<translation>透明模式</translation>
</message>
<message>
<source>OpaqueMode</source>
<translation>不透明模式</translation>
</message>
<message>
<source>Angle0</source>
<translation>0度</translation>
</message>
<message>
<source>Angle90</source>
<translation>90度</translation>
</message>
<message>
<source>Angle180</source>
<translation>180度</translation>
</message>
<message>
<source>Angle270</source>
<translation>270度</translation>
</message>
<message>
<source>Angle45</source>
<translation>45度</translation>
</message>
<message>
<source>Angle315</source>
<translation>315度</translation>
</message>
<message>
<source>DateTime</source>
<translation>日期时间</translation>
</message>
<message>
<source>Double</source>
<translation></translation>
</message>
<message>
<source>NoBrush</source>
<translation>无</translation>
</message>
<message>
<source>SolidPattern</source>
<translation>填充</translation>
</message>
<message>
<source>Dense1Pattern</source>
<translation>密集1</translation>
</message>
<message>
<source>Dense2Pattern</source>
<translation>密集2</translation>
</message>
<message>
<source>Dense3Pattern</source>
<translation>密集3</translation>
</message>
<message>
<source>Dense4Pattern</source>
<translation>密集4</translation>
</message>
<message>
<source>Dense5Pattern</source>
<translation>密集5</translation>
</message>
<message>
<source>Dense6Pattern</source>
<translation>密集6</translation>
</message>
<message>
<source>Dense7Pattern</source>
<translation>密集7</translation>
</message>
<message>
<source>HorPattern</source>
<translation>横条纹</translation>
</message>
<message>
<source>VerPattern</source>
<translation>竖条纹</translation>
</message>
<message>
<source>CrossPattern</source>
<translation>交叉条纹</translation>
</message>
<message>
<source>BDiagPattern</source>
<translation>斜条纹</translation>
</message>
<message>
<source>FDiagPattern</source>
<translation>反斜条纹</translation>
</message>
<message>
<source>LeftToRight</source>
<translation>从左到右</translation>
</message>
<message>
<source>RightToLeft</source>
<translation>从右到左</translation>
</message>
<message>
<source>LayoutDirectionAuto</source>
<translation>自动布局方向</translation>
</message>
<message>
<source>LeftItemAlign</source>
<translation>左对齐</translation>
</message>
<message>
<source>RightItemAlign</source>
<translation>右对齐</translation>
</message>
<message>
<source>CenterItemAlign</source>
<translation>居中对齐</translation>
</message>
<message>
<source>ParentWidthItemAlign</source>
<translation>上层宽度对齐</translation>
</message>
<message>
<source>DesignedItemAlign</source>
<translation>保持设计对齐</translation>
</message>
<message>
<source>HorizontalLine</source>
<translation>水平线</translation>
</message>
<message>
<source>VerticalLine</source>
<translation>垂直线</translation>
</message>
<message>
<source>Ellipse</source>
<translation>椭圆</translation>
</message>
<message>
<source>Rectangle</source>
<translation>矩形</translation>
</message>
<message>
<source>Page</source>
<translation>页</translation>
</message>
<message>
<source>Band</source>
<translation>带</translation>
</message>
<message>
<source>Horizontal</source>
<translation>水平</translation>
</message>
<message>
<source>Vertical</source>
<translation>垂直</translation>
</message>
<message>
<source>VerticalUniform</source>
<translation>均匀垂直</translation>
</message>
<message>
<source>Pie</source>
<translation>饼状图</translation>
</message>
<message>
<source>VerticalBar</source>
<translation>柱状图</translation>
</message>
<message>
<source>HorizontalBar</source>
<translation>条形图</translation>
</message>
<message>
<source>LegendAlignTop</source>
<translation>图例靠上对齐</translation>
</message>
<message>
<source>LegendAlignCenter</source>
<translation>图例居中</translation>
</message>
<message>
<source>LegendAlignBottom</source>
<translation>图例靠下对齐</translation>
</message>
<message>
<source>TitleAlignLeft</source>
<translation>标题左对齐</translation>
</message>
<message>
<source>TitleAlignRight</source>
<translation>标题右对齐</translation>
</message>
<message>
<source>TitleAlignCenter</source>
<translation>标题居中</translation>
</message>
<message>
<source>Layout</source>
<translation>布局</translation>
</message>
<message>
<source>Table</source>
<translation>表</translation>
</message>
<message>
<source>Millimeters</source>
<translation>毫米</translation>
</message>
<message>
<source>Inches</source>
<translation>英寸</translation>
</message>
<message>
<source>Scale</source>
<translation>比例</translation>
</message>
<message>
<source>Split</source>
<translation>划分</translation>
</message>
</context>
<context>
<name>LimeReport::FlagsPropItem</name>
<message>
<source>NoLine</source>
<translation>无边框</translation>
</message>
<message>
<source>TopLine</source>
<translation>顶边框</translation>
</message>
<message>
<source>BottomLine</source>
<translation>底边框</translation>
</message>
<message>
<source>LeftLine</source>
<translation>左边框</translation>
</message>
<message>
<source>RightLine</source>
<translation>右边框</translation>
</message>
<message>
<source>AllLines</source>
<translation>所有边框</translation>
</message>
</context>
<context>
<name>LimeReport::FontEditorWidget</name>
<message>
<source>Font bold</source>
<translation>粗体</translation>
</message>
<message>
<source>Font Italic</source>
<translation>斜体</translation>
</message>
<message>
<source>Font Underline</source>
<translation>下划线</translation>
</message>
</context>
<context>
<name>LimeReport::FontPropItem</name>
<message>
<source>bold</source>
<translation>粗体</translation>
</message>
<message>
<source>italic</source>
<translation>斜体</translation>
</message>
<message>
<source>underline</source>
<translation>下划线</translation>
</message>
<message>
<source>size</source>
<translation>字号</translation>
</message>
<message>
<source>family</source>
<translation>系列</translation>
</message>
</context>
<context>
<name>LimeReport::GroupBandFooter</name>
<message>
<source>GroupFooter</source>
<translation>组带脚</translation>
</message>
</context>
<context>
<name>LimeReport::GroupBandHeader</name>
<message>
<source>GroupHeader</source>
<translation>组带头</translation>
</message>
<message>
<source>Group field not found</source>
<translation>未找到组字段</translation>
</message>
<message>
<source>Datasource "%1" not found!</source>
<translation>未找到数据源 "%1"!</translation>
</message>
</context>
<context>
<name>LimeReport::GroupFunction</name>
<message>
<source>Field "%1" not found</source>
<translation>未找到字段 "%1"</translation>
</message>
<message>
<source>Variable "%1" not found</source>
<translation>未找到变量 "%1"</translation>
</message>
<message>
<source>Wrong script syntax "%1" </source>
<translation>脚本语法错误 "%1" </translation>
</message>
<message>
<source>Item "%1" not found</source>
<translation>未找到目标项 "%1"</translation>
</message>
</context>
<context>
<name>LimeReport::ImageItem</name>
<message>
<source>Image</source>
<translation>图像</translation>
</message>
<message>
<source>Watermark</source>
<translation>水印</translation>
</message>
<message>
<source>Ext.</source>
<translation>扩展名.</translation>
</message>
<message>
<source>Edit</source>
<translation>编辑</translation>
</message>
<message>
<source>Images (*.gif *.icns *.ico *.jpeg *.tga *.tiff *.wbmp *.webp *.png *.jpg *.bmp);;All(*.*)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>LimeReport::ItemLocationPropItem</name>
<message>
<source>Band</source>
<translation>带</translation>
</message>
<message>
<source>Page</source>
<translation>页</translation>
</message>
</context>
<context>
<name>LimeReport::ItemsAlignmentEditorWidget</name>
<message>
<source>Bring to top</source>
<translation>置顶</translation>
</message>
<message>
<source>Send to back</source>
<translation>置底</translation>
</message>
<message>
<source>Align to left</source>
<translation>左对齐</translation>
</message>
<message>
<source>Align to right</source>
<translation>右对齐</translation>
</message>
<message>
<source>Align to vertical center</source>
<translation>居中对齐</translation>
</message>
<message>
<source>Align to top</source>
<translation>顶部对齐</translation>
</message>
<message>
<source>Align to bottom</source>
<translation>底部对齐</translation>
</message>
<message>
<source>Align to horizontal center</source>
<translation>水平居中</translation>
</message>
<message>
<source>Set same height</source>
<translation>相同高度</translation>
</message>
<message>
<source>Set same width</source>
<translation>相同宽度</translation>
</message>
</context>
<context>
<name>LimeReport::ItemsBordersEditorWidget</name>
<message>
<source>Top line</source>
<translation>顶边框</translation>
</message>
<message>
<source>Bottom line</source>
<translation>底边框</translation>
</message>
<message>
<source>Left line</source>
<translation>左边框</translation>
</message>
<message>
<source>Right line</source>
<translation>右边框</translation>
</message>
<message>
<source>No borders</source>
<translation>无边框</translation>
</message>
<message>
<source>All borders</source>
<translation>所有边框</translation>
</message>
<message>
<source>Edit border</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>LimeReport::MasterDetailProxyModel</name>
<message>
<source>Field: "%1" not found in "%2" child datasource</source>
<translation>从数据源 "%2" 中未找到字段: "%1"</translation>
</message>
<message>
<source>Field: "%1" not found in "%2" master datasource</source>
<translation>主数据源 "%2" 中未找到字段: "%1"</translation>
</message>
</context>
<context>
<name>LimeReport::ModelToDataSource</name>
<message>
<source>model is destroyed</source>
<translation>数据模型已销毁</translation>
</message>
</context>
<context>
<name>LimeReport::ObjectBrowser</name>
<message>
<source>Objects</source>
<translation>对象</translation>
</message>
</context>
<context>
<name>LimeReport::ObjectInspectorWidget</name>
<message>
<source>Clear</source>
<translation>清除</translation>
</message>
<message>
<source>Filter</source>
<translation>筛选</translation>
</message>
<message>
<source>Translate properties</source>
<translation>翻译属性</translation>
</message>
</context>
<context>
<name>LimeReport::PDFExporter</name>
<message>
<source>Export to PDF</source>
<translation>导出为PDF文件</translation>
</message>
</context>
<context>
<name>LimeReport::PageEditor</name>
<message>
<source>Page setup</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Paper</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Format</source>
<translation type="unfinished">格式</translation>
</message>
<message>
<source>Dimension</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Width:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source> mm</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Height:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Orientation</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Portrait</source>
<translation type="unfinished">纵向</translation>
</message>
<message>
<source>Landscape</source>
<translation type="unfinished">横向</translation>
</message>
<message>
<source>Margins</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Bottom:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Top:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Right:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Left:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Drop printer margins</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Other</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Height options</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Endless Height</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Extended Height:</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Full page</source>
<translation type="unfinished">全页</translation>
</message>
</context>
<context>
<name>LimeReport::PageFooter</name>
<message>
<source>Page Footer</source>
<translation>页脚</translation>
</message>
<message>
<source>Print on first page</source>
<translation>首页时打印</translation>
</message>
<message>
<source>Print on last page</source>
<translation>末页时打印</translation>
</message>
</context>
<context>
<name>LimeReport::PageHeader</name>
<message>
<source>Page Header</source>
<translation>页眉</translation>
</message>
</context>
<context>
<name>LimeReport::PageItemDesignIntf</name>
<message>
<source>Paste</source>
<translation>粘贴</translation>
</message>
<message>
<source>Page is TOC</source>
<translation>目录页面</translation>
</message>
<message>
<source>Reset page number</source>
<translation>重置页数</translation>
</message>
<message>
<source>Full page</source>
<translation>全页</translation>
</message>
<message>
<source>Set page size to printer</source>
<translation>适合打印机纸张大小</translation>
</message>
<message>
<source>Mix with prior page</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Edit</source>
<translation type="unfinished">编辑</translation>
</message>
</context>
<context>
<name>LimeReport::PreviewReportWidget</name>
<message>
<source>Form</source>
<translation>表格</translation>
</message>
<message>
<source>Report file name</source>
<translation>报表文件名</translation>
</message>
<message>
<source>%1 file name</source>
<translation>文件名 %1</translation>
</message>
</context>
<context>
<name>LimeReport::PreviewReportWindow</name>
<message>
<source>Preview</source>
<translation>预览</translation>
</message>
<message>
<source>View</source>
<translation>查看</translation>
</message>
<message>
<source>Report</source>
<translation>报表</translation>
</message>
<message>
<source>Print</source>
<translation>打印</translation>
</message>
<message>
<source>Ctrl+P</source>
<translation></translation>
</message>
<message>
<source>Zoom In</source>
<translation>放大</translation>
</message>
<message>
<source>Zoom Out</source>
<translation>缩小</translation>
</message>
<message>
<source>Prior Page</source>
<translation>上一页</translation>
</message>
<message>
<source>Next Page</source>
<translation>下一页</translation>
</message>
<message>
<source>Close Preview</source>
<translation>关闭预览</translation>
</message>
<message>
<source>Esc</source>
<translation></translation>
</message>
<message>
<source>Edit Mode</source>
<translation>编辑模式</translation>
</message>
<message>
<source>Save to file</source>
<translation>保存</translation>
</message>
<message>
<source>Show errors</source>
<translation>显示错误</translation>
</message>
<message>
<source>First Page</source>
<translation>首页</translation>
</message>
<message>
<source>First page</source>
<translation>首页</translation>
</message>
<message>
<source>Last Page</source>
<translation>末页</translation>
</message>
<message>
<source>Print To PDF</source>
<translation>打印到PDF文件</translation>
</message>
<message>
<source>Fit page width</source>
<translation>适合页宽</translation>
</message>
<message>
<source>Fit page</source>
<translation>适合页高</translation>
</message>
<message>
<source>One to one</source>
<translation>原始尺寸</translation>
</message>
<message>
<source>Show Toolbar</source>
<translation>显示工具栏</translation>
</message>
<message>
<source>Show toolbar</source>
<translation>显示工具栏</translation>
</message>
<message>
<source>Page: </source>
<translation>页数: </translation>
</message>
<message>
<source>Font</source>
<translation>字体</translation>
</message>
<message>
<source>Text align</source>
<translation>文本对齐</translation>
</message>
<message>
<source> of %1</source>
<translation> / %1</translation>
</message>
<message>
<source>InsertTextItem</source>
<translation>插入文本组件</translation>
</message>
<message>
<source>Add new TextItem</source>
<translation>新增文本组件</translation>
</message>
<message>
<source>Selection Mode</source>
<translation>选择模式</translation>
</message>
<message>
<source>Delete Item</source>
<translation>删除组件</translation>
</message>
<message>
<source>Del</source>
<translation>删除</translation>
</message>
<message>
<source>MainToolBar</source>
<translation>工具条</translation>
</message>
<message>
<source>EditModeTools</source>
<translation>编辑工具</translation>
</message>
<message>
<source>Printing</source>
<translation>正在打印</translation>
</message>
<message>
<source>Attention</source>
<translation>注意</translation>
</message>
<message>
<source>The printing is in process</source>
<translation>打印正在处理</translation>
</message>
</context>
<context>
<name>LimeReport::ProxyHolder</name>
<message>
<source>Datasource has been invalidated</source>
<translation>数据源已失效</translation>
</message>
</context>
<context>
<name>LimeReport::QObjectPropertyModel</name>
<message>
<source>leftMargin</source>
<translation>左边距</translation>
</message>
<message>
<source>rightMargin</source>
<translation>右边距</translation>
</message>
<message>
<source>topMargin</source>
<translation>顶边距</translation>
</message>
<message>
<source>bottomMargin</source>
<translation>底边距</translation>
</message>
<message>
<source>objectName</source>
<translation>对象名称</translation>
</message>
<message>
<source>borders</source>
<translation>边框</translation>
</message>
<message>
<source>geometry</source>
<translation>形状</translation>
</message>
<message>
<source>itemAlign</source>
<translation>对齐方式</translation>
</message>
<message>
<source>pageOrientation</source>
<translation>页面布局</translation>
</message>
<message>
<source>pageSize</source>
<translation>页面规格</translation>
</message>
<message>
<source>TopLine</source>
<translation>顶边框</translation>
</message>
<message>
<source>BottomLine</source>
<translation>底边框</translation>
</message>
<message>
<source>LeftLine</source>
<translation>左边框</translation>
</message>
<message>
<source>RightLine</source>
<translation>右边框</translation>
</message>
<message>
<source>reprintOnEachPage</source>
<translation>重新打印每页</translation>
</message>
<message>
<source>borderLineSize</source>
<translation>边框线宽</translation>
</message>
<message>
<source>autoHeight</source>
<translation>自动高度</translation>
</message>
<message>
<source>backgroundColor</source>
<translation>背景颜色</translation>
</message>
<message>
<source>columnCount</source>
<translation>列数</translation>
</message>
<message>
<source>columnsFillDirection</source>
<translation>列填充方向</translation>
</message>
<message>
<source>datasource</source>
<translation>数据源</translation>
</message>
<message>
<source>keepBottomSpace</source>
<translation>保持底部空间</translation>
</message>
<message>
<source>keepFooterTogether</source>
<translation>保持页脚</translation>
</message>
<message>
<source>keepSubdetailTogether</source>
<translation>保持子细节脚</translation>
</message>
<message>
<source>printIfEmpty</source>
<translation>为空时打印</translation>
</message>
<message>
<source>sliceLastRow</source>
<translation>分割末行</translation>
</message>
<message>
<source>splittable</source>
<translation>可拆分</translation>
</message>
<message>
<source>alignment</source>
<translation>对齐</translation>
</message>
<message>
<source>angle</source>
<translation>角度</translation>
</message>
<message>
<source>autoWidth</source>
<translation>自动宽度</translation>
</message>
<message>
<source>backgroundMode</source>
<translation>背景模式</translation>
</message>
<message>
<source>backgroundOpacity</source>
<translation>背景不透明度</translation>
</message>
<message>
<source>content</source>
<translation>内容</translation>
</message>
<message>
<source>font</source>
<translation>字体</translation>
</message>
<message>
<source>fontColor</source>
<translation>字体颜色</translation>
</message>
<message>
<source>foregroundOpacity</source>
<translation>背景不透明度</translation>
</message>
<message>
<source>itemLocation</source>
<translation>组件位置</translation>
</message>
<message>
<source>margin</source>
<translation>边距</translation>
</message>
<message>
<source>stretchToMaxHeight</source>
<translation>拉伸到最大高度</translation>
</message>
<message>
<source>trimValue</source>
<translation>裁剪值</translation>
</message>
<message>
<source>lineWidth</source>
<translation>线宽</translation>
</message>
<message>
<source>opacity</source>
<translation>不透明度</translation>
</message>
<message>
<source>penStyle</source>
<translation>画笔样式</translation>
</message>
<message>
<source>shape</source>
<translation>形状</translation>
</message>
<message>
<source>shapeBrush</source>
<translation>画刷</translation>
</message>
<message>
<source>shapeBrushColor</source>
<translation>画刷颜色</translation>
</message>
<message>
<source>gridStep</source>
<translation>栅格长</translation>
</message>
<message>
<source>fullPage</source>
<translation>全页</translation>
</message>
<message>
<source>oldPrintMode</source>
<translation>旧打印模式</translation>
</message>
<message>
<source>borderColor</source>
<translation>边框颜色</translation>
</message>
<message>
<source>resetPageNumber</source>
<translation>重置页号</translation>
</message>
<message>
<source>alternateBackgroundColor</source>
<translation>变更背景色</translation>
</message>
<message>
<source>backgroundBrushStyle</source>
<translation>背景画刷样式</translation>
</message>
<message>
<source>startFromNewPage</source>
<translation>从新页开始</translation>
</message>
<message>
<source>startNewPage</source>
<translation>开始新页</translation>
</message>
<message>
<source>adaptFontToSize</source>
<translation>字体适应字号</translation>
</message>
<message>
<source>allowHTML</source>
<translation>允许HTML</translation>
</message>
<message>
<source>allowHTMLInFields</source>
<translation>允许字段HTML</translation>
</message>
<message>
<source>followTo</source>
<translation>跟随</translation>
</message>
<message>
<source>format</source>
<translation>格式</translation>
</message>
<message>
<source>lineSpacing</source>
<translation>线距</translation>
</message>
<message>
<source>textIndent</source>
<translation>文本缩进</translation>
</message>
<message>
<source>textLayoutDirection</source>
<translation>文本布局方向</translation>
</message>
<message>
<source>underlineLineSize</source>
<translation>下划线宽</translation>
</message>
<message>
<source>underlines</source>
<translation>下划线</translation>
</message>
<message>
<source>valueType</source>
<translation>值类型</translation>
</message>
<message>
<source>securityLevel</source>
<translation>安全级别</translation>
</message>
<message>
<source>testValue</source>
<translation>测试值</translation>
</message>
<message>
<source>whitespace</source>
<translation>空格</translation>
</message>
<message>
<source>resourcePath</source>
<translation>资源路径</translation>
</message>
<message>
<source>scale</source>
<translation>比例</translation>
</message>
<message>
<source>cornerRadius</source>
<translation>圆角半径</translation>
</message>
<message>
<source>shapeColor</source>
<translation>颜色</translation>
</message>
<message>
<source>layoutType</source>
<translation>布局类型</translation>
</message>
<message>
<source>barcodeType</source>
<translation>条码类型</translation>
</message>
<message>
<source>barcodeWidth</source>
<translation>条码宽度</translation>
</message>
<message>
<source>foregroundColor</source>
<translation>颜色</translation>
</message>
<message>
<source>inputMode</source>
<translation>输入法</translation>
</message>
<message>
<source>pdf417CodeWords</source>
<translation>PDF417码</translation>
</message>
<message>
<source>autoSize</source>
<translation>自动大小</translation>
</message>
<message>
<source>center</source>
<translation>居中</translation>
</message>
<message>
<source>field</source>
<translation>字段</translation>
</message>
<message>
<source>image</source>
<translation>图像</translation>
</message>
<message>
<source>keepAspectRatio</source>
<translation>保持比例</translation>
</message>
<message>
<source>columnsCount</source>
<translation>列数</translation>
</message>
<message>
<source>useAlternateBackgroundColor</source>
<translation>使用变更背景色</translation>
</message>
<message>
<source>printBeforePageHeader</source>
<translation>页眉前打印</translation>
</message>
<message>
<source>maxScalePercent</source>
<translation>最大百分比</translation>
</message>
<message>
<source>printOnFirstPage</source>
<translation>打印到首页</translation>
</message>
<message>
<source>printOnLastPage</source>
<translation>打印到尾页</translation>
</message>
<message>
<source>printAlways</source>
<translation>始终打印</translation>
</message>
<message>
<source>repeatOnEachRow</source>
<translation>每行重复</translation>
</message>
<message>
<source>condition</source>
<translation>条件</translation>
</message>
<message>
<source>groupFieldName</source>
<translation>组字段名</translation>
</message>
<message>
<source>keepGroupTogether</source>
<translation>保持组脚</translation>
</message>
<message>
<source>Property Name</source>
<translation>属性名</translation>
</message>
<message>
<source>Property value</source>
<translation>属性值</translation>
</message>
<message>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<source>endlessHeight</source>
<translation>无限高度</translation>
</message>
<message>
<source>extendedHeight</source>
<translation>扩展高度</translation>
</message>
<message>
<source>isExtendedInDesignMode</source>
<translation>设计模式扩展</translation>
</message>
<message>
<source>pageIsTOC</source>
<translation>目录页面</translation>
</message>
<message>
<source>setPageSizeToPrinter</source>
<translation>适合打印机纸张</translation>
</message>
<message>
<source>fillInSecondPass</source>
<translation>二次填充</translation>
</message>
<message>
<source>chartTitle</source>
<translation>图表标题</translation>
</message>
<message>
<source>chartType</source>
<translation>图表类型</translation>
</message>
<message>
<source>drawLegendBorder</source>
<translation>显示图例边框</translation>
</message>
<message>
<source>labelsField</source>
<translation>标签字段</translation>
</message>
<message>
<source>legendAlign</source>
<translation>图例对齐</translation>
</message>
<message>
<source>series</source>
<translation>数据系列</translation>
</message>
<message>
<source>titleAlign</source>
<translation>标题对齐</translation>
</message>
<message>
<source>watermark</source>
<translation>水印</translation>
</message>
<message>
<source>keepTopSpace</source>
<translation>保持顶部距离</translation>
</message>
<message>
<source>printable</source>
<translation>可打印</translation>
</message>
<message>
<source>variable</source>
<translation>变量</translation>
</message>
<message>
<source>replaceCRwithBR</source>
<translation>替换回车换行</translation>
</message>
<message>
<source>hideIfEmpty</source>
<translation>为空时打印</translation>
</message>
<message>
<source>hideEmptyItems</source>
<translation>隐藏空组件</translation>
</message>
<message>
<source>useExternalPainter</source>
<translation>使用外部绘图</translation>
</message>
<message>
<source>layoutSpacing</source>
<translation>布局间距</translation>
</message>
<message>
<source>printerName</source>
<translation>打印机名称</translation>
</message>
<message>
<source>fontLetterSpacing</source>
<translation>字母间距</translation>
</message>
<message>
<source>hideText</source>
<translation>隐藏文本</translation>
</message>
<message>
<source>option3</source>
<translation>选项3</translation>
</message>
<message>
<source>units</source>
<translation>单位</translation>
</message>
<message>
<source>geometryLocked</source>
<translation>形状锁定</translation>
</message>
<message>
<source>printBehavior</source>
<translation>打印行为</translation>
</message>
<message>
<source>shiftItems</source>
<translation>偏移组件</translation>
</message>
<message>
<source>showLegend</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>removeGap</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>xAxisField</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>seriesLineWidth</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>drawPoints</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>dropPrinterMargins</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>notPrintIfEmpty</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>gridChartLines</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>horizontalAxisOnTop</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>mixWithPriorPage</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>LimeReport::RectPropItem</name>
<message>
<source>width</source>
<translation>宽</translation>
</message>
<message>
<source>height</source>
<translation>高</translation>
</message>
</context>
<context>
<name>LimeReport::RectUnitPropItem</name>
<message>
<source>width</source>
<translation>宽</translation>
</message>
<message>
<source>height</source>
<translation>高</translation>
</message>
</context>
<context>
<name>LimeReport::ReportDesignWidget</name>
<message>
<source>Script</source>
<translation>脚本</translation>
</message>
<message>
<source>Report file name</source>
<translation>报表文件名</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>Wrong file format</source>
<translation>文件格式错误</translation>
</message>
<message>
<source>Translations</source>
<translation>翻译</translation>
</message>
</context>
<context>
<name>LimeReport::ReportDesignWindow</name>
<message>
<source>New Report</source>
<translation>新建报表</translation>
</message>
<message>
<source>New Report Page</source>
<translation>新建页</translation>
</message>
<message>
<source>Delete Report Page</source>
<translation>删除页</translation>
</message>
<message>
<source>Edit Mode</source>
<translation>编辑模式</translation>
</message>
<message>
<source>Undo</source>
<translation>撤销</translation>
</message>
<message>
<source>Redo</source>
<translation>重做</translation>
</message>
<message>
<source>Copy</source>
<translation>复制</translation>
</message>
<message>
<source>Paste</source>
<translation>粘贴</translation>
</message>
<message>
<source>Cut</source>
<translation>剪切</translation>
</message>
<message>
<source>Settings</source>
<translation>设置</translation>
</message>
<message>
<source>Use grid</source>
<translation>使用栅格</translation>
</message>
<message>
<source>Use magnet</source>
<translation>使用磁力</translation>
</message>
<message>
<source>Text Item</source>
<translation>文本组件</translation>
</message>
<message>
<source>Save Report</source>
<translation>保存报表</translation>
</message>
<message>
<source>Save Report As</source>
<translation>另存为</translation>
</message>
<message>
<source>Load Report</source>
<translation>读取报表</translation>
</message>
<message>
<source>Delete item</source>
<translation>删除组件</translation>
</message>
<message>
<source>Zoom In</source>
<translation>放大</translation>
</message>
<message>
<source>Zoom Out</source>
<translation>缩小</translation>
</message>
<message>
<source>Render Report</source>
<translation>生成报表</translation>
</message>
<message>
<source>Edit layouts mode</source>
<translation>编辑布局模式</translation>
</message>
<message>
<source>Horizontal layout</source>
<translation>水平布局</translation>
</message>
<message>
<source>About</source>
<translation>关于</translation>
</message>
<message>
<source>Hide left panel | Alt+L</source>
<translation>隐藏左面板 | Alt+L</translation>
</message>
<message>
<source>Hide right panel | Alt+R</source>
<translation>隐藏右面板 | Alt+R</translation>
</message>
<message>
<source>Report Tools</source>
<translation>报表工具</translation>
</message>
<message>
<source>Main Tools</source>
<translation>工具</translation>
</message>
<message>
<source>Font</source>
<translation>字体</translation>
</message>
<message>
<source>Text alignment</source>
<translation>文本对齐</translation>
</message>
<message>
<source>Items alignment</source>
<translation>组件对齐</translation>
</message>
<message>
<source>Borders</source>
<translation>边框</translation>
</message>
<message>
<source>Report bands</source>
<translation>报表带</translation>
</message>
<message>
<source>Report Header</source>
<translation>表头</translation>
</message>
<message>
<source>Report Footer</source>
<translation>表脚</translation>
</message>
<message>
<source>Page Header</source>
<translation>页眉</translation>
</message>
<message>
<source>Page Footer</source>
<translation>页脚</translation>
</message>
<message>
<source>Data</source>
<translation>数据带</translation>
</message>
<message>
<source>Data Header</source>
<translation>数据带头</translation>
</message>
<message>
<source>Data Footer</source>
<translation>数据带脚</translation>
</message>
<message>
<source>SubDetail</source>
<translation>子细节带</translation>
</message>
<message>
<source>SubDetailHeader</source>
<translation>子细节带头</translation>
</message>
<message>
<source>SubDetailFooter</source>
<translation>子细节带脚</translation>
</message>
<message>
<source>GroupHeader</source>
<translation>组带头</translation>
</message>
<message>
<source>GroupFooter</source>
<translation>组带脚</translation>
</message>
<message>
<source>Tear-off Band</source>
<translation>分离带</translation>
</message>
<message>
<source>File</source>
<translation>文件</translation>
</message>
<message>
<source>Edit</source>
<translation>编辑</translation>
</message>
<message>
<source>Info</source>
<translation>信息</translation>
</message>
<message>
<source>Recent Files</source>
<translation>最近打开文件</translation>
</message>
<message>
<source>Object Inspector</source>
<translation>对象观察器</translation>
</message>
<message>
<source>Report structure</source>
<translation>报表结构</translation>
</message>
<message>
<source>Data Browser</source>
<translation>数据浏览器</translation>
</message>
<message>
<source>Script Browser</source>
<translation>脚本浏览器</translation>
</message>
<message>
<source>Report has been modified! Do you want save the report?</source>
<translation>报表已修改! 是否保存?</translation>
</message>
<message>
<source>Report file name</source>
<translation>报表文件名</translation>
</message>
<message>
<source>Warning</source>
<translation>警告</translation>
</message>
<message>
<source>File "%1" not found!</source>
<translation>未找到文件 "%1"!</translation>
</message>
<message>
<source>Delete dialog</source>
<translation>删除对话框</translation>
</message>
<message>
<source>Add new dialog</source>
<translation>新增对话框</translation>
</message>
<message>
<source>Widget Box</source>
<translation>组件盒</translation>
</message>
<message>
<source>Property Editor</source>
<translation>属性编辑器</translation>
</message>
<message>
<source>Action Editor</source>
<translation>动作编辑器</translation>
</message>
<message>
<source>Resource Editor</source>
<translation>资源编辑器</translation>
</message>
<message>
<source>SignalSlot Editor</source>
<translation>信号槽编辑器</translation>
</message>
<message>
<source>Dialog Designer Tools</source>
<translation>对话框设计工具</translation>
</message>
<message>
<source>Vertical layout</source>
<translation>水平布局</translation>
</message>
<message>
<source>Lock selected items</source>
<translation>锁定选定组件</translation>
</message>
<message>
<source>Unlock selected items</source>
<translation>解锁选定组件</translation>
</message>
<message>
<source>Select one level items</source>
<translation>选择一级组件</translation>
</message>
<message>
<source>Rendered %1 pages</source>
<translation>已处理%1页</translation>
</message>
<message>
<source>Cancel report rendering</source>
<translation>取消打印</translation>
</message>
<message>
<source>Attention</source>
<translation>注意</translation>
</message>
<message>
<source>The rendering is in process</source>
<translation>正在处理中</translation>
</message>
</context>
<context>
<name>LimeReport::ReportEnginePrivate</name>
<message>
<source>Preview</source>
<translation>预览</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>Report File Change</source>
<translation>报表文件改变</translation>
</message>
<message>
<source>The report file "%1" has changed names or been deleted.
This preview is no longer valid.</source>
<translation>报表文件 "%1" 重命名或删除。
预览已无效。</translation>
</message>
<message>
<source>Designer not found!</source>
<translation>设计器未找到!</translation>
</message>
<message>
<source>Language %1 already exists</source>
<translation>语言 %1 已存在</translation>
</message>
<message>
<source>%1 file name</source>
<translation>文件名 %1</translation>
</message>
</context>
<context>
<name>LimeReport::ReportFooter</name>
<message>
<source>Report Footer</source>
<translation>表脚</translation>
</message>
</context>
<context>
<name>LimeReport::ReportHeader</name>
<message>
<source>Report Header</source>
<translation>表头</translation>
</message>
</context>
<context>
<name>LimeReport::ReportRender</name>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>page index out of range</source>
<translation>页索引越界</translation>
</message>
<message>
<source>Databand "%1" not found</source>
<translation>未找到数据带 "%1"</translation>
</message>
<message>
<source>Wrong using function %1</source>
<translation>函数 %1 使用错误</translation>
</message>
</context>
<context>
<name>LimeReport::SQLEditDialog</name>
<message>
<source>Datasource</source>
<translation>数据源</translation>
</message>
<message>
<source>Connection</source>
<translation>数据连接</translation>
</message>
<message>
<source>Datasource Name</source>
<translation>数据源名</translation>
</message>
<message>
<source>Subdetail</source>
<translation>子细节</translation>
</message>
<message>
<source>Master datasource</source>
<translation>主数据源</translation>
</message>
<message>
<source>Subquery mode</source>
<translation>子查询模式</translation>
</message>
<message>
<source>Filter mode</source>
<translation>筛选模式</translation>
</message>
<message>
<source>SQL</source>
<translation></translation>
</message>
<message>
<source>Preview</source>
<translation>预览</translation>
</message>
<message>
<source>Hide Preview</source>
<translation>隐藏预览</translation>
</message>
<message>
<source>Child datasource</source>
<translation>子数据源</translation>
</message>
<message>
<source>Fields map</source>
<translation>字段映射</translation>
</message>
<message>
<source>...</source>
<translation></translation>
</message>
<message>
<source>Data preview</source>
<translation>数据预览</translation>
</message>
<message>
<source>Cancel</source>
<translation>取消</translation>
</message>
<message>
<source>Ok</source>
<translation>确定</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>Datasource Name is empty!</source>
<translation>数据源名为空!</translation>
</message>
<message>
<source>SQL is empty!</source>
<translation>SQL语句为空!</translation>
</message>
<message>
<source>Datasource with name: "%1" already exists!</source>
<translation>数据源 "%1" 已存在!</translation>
</message>
<message>
<source>defaultConnection</source>
<translation>默认连接</translation>
</message>
<message>
<source>Datasource with name %1 already exist</source>
<translation>数据源 "%1" 已存在</translation>
</message>
<message>
<source>Attention</source>
<translation>注意</translation>
</message>
<message>
<source>Connection is not specified</source>
<translation>未指定连接</translation>
</message>
<message>
<source>Refresh</source>
<translation>刷新</translation>
</message>
<message>
<source>CSV</source>
<translation>CSV</translation>
</message>
<message>
<source>Separator</source>
<translation>分隔符</translation>
</message>
<message>
<source>;</source>
<translation>;</translation>
</message>
<message>
<source>Use first row as header</source>
<translation>第一行为头</translation>
</message>
</context>
<context>
<name>LimeReport::SVGItem</name>
<message>
<source>SVG Image</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>SVG (*.svg)</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Edit</source>
<translation type="unfinished">编辑</translation>
</message>
<message>
<source>Watermark</source>
<translation type="unfinished">水印</translation>
</message>
</context>
<context>
<name>LimeReport::ScriptBrowser</name>
<message>
<source>Form</source>
<translation>表单</translation>
</message>
<message>
<source>Functions</source>
<translation>函数</translation>
</message>
<message>
<source>...</source>
<translation></translation>
</message>
<message>
<source>Dialogs</source>
<translation>对话框</translation>
</message>
<message>
<source>Type</source>
<translation>类型</translation>
</message>
<message>
<source>Name</source>
<translation>名称</translation>
</message>
<message>
<source>NO CATEGORY</source>
<translation>无类别</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>Dialog with name: %1 already exists</source>
<translation>对话框 %1 已存在</translation>
</message>
<message>
<source>ui file must cointain QDialog instead QWidget or QMainWindow</source>
<translation>ui 文件必须包含 QDialog 而不是 QWidget 或 QMainWindow</translation>
</message>
<message>
<source>wrong file format</source>
<translation>文件格式错误</translation>
</message>
</context>
<context>
<name>LimeReport::ScriptEditor</name>
<message>
<source>Form</source>
<translation>表格</translation>
</message>
<message>
<source>Data</source>
<translation>数据</translation>
</message>
<message>
<source>Functions</source>
<translation>函数</translation>
</message>
</context>
<context>
<name>LimeReport::ScriptEngineContext</name>
<message>
<source>Dialog with name: %1 can`t be created</source>
<translation>无法创建对话框 %1</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
</context>
<context>
<name>LimeReport::ScriptEngineManager</name>
<message>
<source>GROUP FUNCTIONS</source>
<translation>组函数</translation>
</message>
<message>
<source>Value</source>
<translation>值</translation>
</message>
<message>
<source>BandName</source>
<translation>带名称</translation>
</message>
<message>
<source>Variable %1 not found</source>
<translation>未找到变量 %1</translation>
</message>
<message>
<source>SYSTEM</source>
<translation>系统</translation>
</message>
<message>
<source>NUMBER</source>
<translation>数字</translation>
</message>
<message>
<source>Format</source>
<translation>格式</translation>
</message>
<message>
<source>Precision</source>
<translation>精度</translation>
</message>
<message>
<source>Locale</source>
<translation>区域</translation>
</message>
<message>
<source>DATE&TIME</source>
<translation>日期&时间</translation>
</message>
<message>
<source>CurrencySymbol</source>
<translation>货币符号</translation>
</message>
<message>
<source>GENERAL</source>
<translation>通用</translation>
</message>
<message>
<source>Name</source>
<translation>名称</translation>
</message>
<message>
<source>Function manager with name "%1" already exists!</source>
<translation>函数管理器 %1 已存在!</translation>
</message>
<message>
<source>FieldName</source>
<translation>字段名</translation>
</message>
<message>
<source>Field %1 not found in %2!</source>
<translation>在 %2 中找不到字段 %1 !</translation>
</message>
<message>
<source>Datasource</source>
<translation>数据源</translation>
</message>
<message>
<source>ValueField</source>
<translation>值字段</translation>
</message>
<message>
<source>KeyField</source>
<translation>键名字段</translation>
</message>
<message>
<source>KeyFieldValue</source>
<translation>键字段值</translation>
</message>
<message>
<source>Unique identifier</source>
<translation>唯一标识符</translation>
</message>
<message>
<source>Content</source>
<translation>内容内容</translation>
</message>
<message>
<source>Indent</source>
<translation>缩进</translation>
</message>
<message>
<source>datasourceName</source>
<translation>数据源名称</translation>
</message>
<message>
<source>RowIndex</source>
<translation>行索引</translation>
</message>
</context>
<context>
<name>LimeReport::SettingDialog</name>
<message>
<source>Designer setting</source>
<translation>设计器设置</translation>
</message>
<message>
<source>Default font</source>
<translation>默认字体</translation>
</message>
<message>
<source>Grid</source>
<translation>栅格</translation>
</message>
<message>
<source>Vertical grid step</source>
<translation>竖栅格</translation>
</message>
<message>
<source>Horizontal grid step</source>
<translation>横栅格</translation>
</message>
<message>
<source>Suppress absent fields and variables warning</source>
<translation>抑制缺失字段及变量警告</translation>
</message>
<message>
<source>Language</source>
<translation>语言</translation>
</message>
<message>
<source>Designer settings</source>
<translation>设计器设置</translation>
</message>
<message>
<source>Theme</source>
<translation>主题</translation>
</message>
<message>
<source>Report units</source>
<translation>报表单位</translation>
</message>
<message>
<source>Script editor settings</source>
<translation>脚本编辑器设置</translation>
</message>
<message>
<source>Font</source>
<translation>字体</translation>
</message>
<message>
<source>Indent size</source>
<translation>缩进尺寸</translation>
</message>
<message>
<source>Report settings</source>
<translation>报表设置</translation>
</message>
</context>
<context>
<name>LimeReport::SubDetailBand</name>
<message>
<source>SubDetail</source>
<translation>子细节</translation>
</message>
</context>
<context>
<name>LimeReport::SubDetailHeaderBand</name>
<message>
<source>SubDetailHeader</source>
<translation>子细节头</translation>
</message>
</context>
<context>
<name>LimeReport::SvgEditor</name>
<message>
<source>Select image file</source>
<translation type="unfinished">选择图像文件</translation>
</message>
</context>
<context>
<name>LimeReport::TearOffBand</name>
<message>
<source>Tear-off Band</source>
<translation>分离带</translation>
</message>
</context>
<context>
<name>LimeReport::TextAlignmentEditorWidget</name>
<message>
<source>Text align left</source>
<translation>文本左对齐</translation>
</message>
<message>
<source>Text align center</source>
<translation>文本居中对齐</translation>
</message>
<message>
<source>Text align right</source>
<translation>文本右对齐</translation>
</message>
<message>
<source>Text align justify</source>
<translation>文本行对齐</translation>
</message>
<message>
<source>Text align top</source>
<translation>文本顶部对齐</translation>
</message>
<message>
<source>Text align bottom</source>
<translation>文本底部对齐</translation>
</message>
</context>
<context>
<name>LimeReport::TextItem</name>
<message>
<source>Edit</source>
<translation>编辑</translation>
</message>
<message>
<source>Auto height</source>
<translation>自动高度</translation>
</message>
<message>
<source>Allow HTML</source>
<translation>允许HTML</translation>
</message>
<message>
<source>Allow HTML in fields</source>
<translation>允许字段HTML</translation>
</message>
<message>
<source>Stretch to max height</source>
<translation>拉伸至最大高度</translation>
</message>
<message>
<source>Error</source>
<translation>错误</translation>
</message>
<message>
<source>TextItem " %1 " already has folower " %2 " </source>
<translation>文本框 "%1 " 已有 "%2 " </translation>
</message>
<message>
<source>TextItem " %1 " not found!</source>
<translation>未找到文本框 "%1"!</translation>
</message>
<message>
<source>Transparent</source>
<translation>透明</translation>
</message>
<message>
<source>Watermark</source>
<translation>水印</translation>
</message>
<message>
<source>Hide if empty</source>
<translation>为空时隐藏</translation>
</message>
</context>
<context>
<name>LimeReport::TextItemEditor</name>
<message>
<source>Text Item Editor</source>
<translation>文本编辑器</translation>
</message>
<message>
<source>Content</source>
<translation>内容</translation>
</message>
<message>
<source>Ok</source>
<translation>确定</translation>
</message>
<message>
<source>Ctrl+Return</source>
<translation></translation>
</message>
<message>
<source>Cancel</source>
<translation>取消</translation>
</message>
</context>
<context>
<name>LimeReport::TranslationEditor</name>
<message>
<source>Form</source>
<translation>表格</translation>
</message>
<message>
<source>Languages</source>
<translation>语言</translation>
</message>
<message>
<source>...</source>
<translation>...</translation>
</message>
<message>
<source>Pages</source>
<translation>页</translation>
</message>
<message>
<source>Strings</source>
<translation>字符串</translation>
</message>
<message>
<source>Source Text</source>
<translation>源文</translation>
</message>
<message>
<source>Translation</source>
<translation>译文</translation>
</message>
<message>
<source>Checked</source>
<translation>选中</translation>
</message>
<message>
<source>Report Item</source>
<translation>报表组件</translation>
</message>
<message>
<source>Property</source>
<translation>属性</translation>
</message>
<message>
<source>Source text</source>
<translation>源文</translation>
</message>
</context>
<context>
<name>LimeReport::VariablesHolder</name>
<message>
<source>variable with name </source>
<translation>变量 </translation>
</message>
<message>
<source> already exists!</source>
<translation> 已存在!</translation>
</message>
<message>
<source> does not exists!</source>
<translation> 不存在!</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<source>Data</source>
<translation>数据带</translation>
</message>
<message>
<source>DataHeader</source>
<translation>数据带头</translation>
</message>
<message>
<source>DataFooter</source>
<translation>数据带脚</translation>
</message>
<message>
<source>GroupHeader</source>
<translation>组带头</translation>
</message>
<message>
<source>GroupFooter</source>
<translation>组带脚</translation>
</message>
<message>
<source>Page Footer</source>
<translation>页脚</translation>
</message>
<message>
<source>Page Header</source>
<translation>页眉</translation>
</message>
<message>
<source>Report Footer</source>
<translation>表脚</translation>
</message>
<message>
<source>Report Header</source>
<translation>表头</translation>
</message>
<message>
<source>SubDetail</source>
<translation>子细节</translation>
</message>
<message>
<source>SubDetailHeader</source>
<translation>子细节头</translation>
</message>
<message>
<source>SubDetailFooter</source>
<translation>子细节带脚</translation>
</message>
<message>
<source>Tear-off Band</source>
<translation>分离带</translation>
</message>
<message>
<source>alignment</source>
<translation>对齐</translation>
</message>
<message>
<source>Barcode Item</source>
<translation>条码组件</translation>
</message>
<message>
<source>HLayout</source>
<translation>水平布局</translation>
</message>
<message>
<source>Image Item</source>
<translation>图像组件</translation>
</message>
<message>
<source>Shape Item</source>
<translation>图形组件</translation>
</message>
<message>
<source>itemLocation</source>
<translation>组件位置</translation>
</message>
<message>
<source>Text Item</source>
<translation>文本组件</translation>
</message>
<message>
<source>Invalid connection! %1</source>
<translation>无效连接 %1</translation>
</message>
<message>
<source>Master datasource "%1" not found!</source>
<translation>未找到主数据源 "%1"!</translation>
</message>
<message>
<source>Master datasouce "%1" not found!</source>
<translation>未找到主数据源 "%1"!</translation>
</message>
<message>
<source>Child</source>
<translation>子</translation>
</message>
<message>
<source> and child </source>
<translation> 子数据源 </translation>
</message>
<message>
<source>datasouce "%1" not found!</source>
<translation>未找到子数据源"%1"!</translation>
</message>
<message>
<source>bool</source>
<translation></translation>
</message>
<message>
<source>QColor</source>
<translation></translation>
</message>
<message>
<source>content</source>
<translation>内容</translation>
</message>
<message>
<source>datasource</source>
<translation>数据源</translation>
</message>
<message>
<source>field</source>
<translation>字段映射</translation>
</message>
<message>
<source>enum</source>
<translation></translation>
</message>
<message>
<source>flags</source>
<translation></translation>
</message>
<message>
<source>QFont</source>
<translation></translation>
</message>
<message>
<source>QImage</source>
<translation></translation>
</message>
<message>
<source>int</source>
<translation></translation>
</message>
<message>
<source>qreal</source>
<translation></translation>
</message>
<message>
<source>QRect</source>
<translation></translation>
</message>
<message>
<source>QRectF</source>
<translation></translation>
</message>
<message>
<source>geometry</source>
<translation>形状</translation>
</message>
<message>
<source>QString</source>
<translation></translation>
</message>
<message>
<source>Attention!</source>
<translation>注意!</translation>
</message>
<message>
<source>Selected elements have different parent containers</source>
<translation>选中元素有不同的容器</translation>
</message>
<message>
<source>Object with name %1 already exists!</source>
<translation>对象 %1 已存在!</translation>
</message>
<message>
<source>Function %1 not found or have wrong arguments</source>
<translation>未找到函数 %1 或参数错误</translation>
</message>
<message>
<source>mm</source>
<translation>毫米</translation>
</message>
<message>
<source>Wrong file format</source>
<translation>文件格式错误</translation>
</message>
<message>
<source>File %1 not opened</source>
<translation>无法打开文件 %1</translation>
</message>
<message>
<source>Content string is empty</source>
<translation>字符串为空</translation>
</message>
<message>
<source>Content is empty</source>
<translation>字符串为空</translation>
</message>
<message>
<source>Chart Item</source>
<translation>图表组件</translation>
</message>
<message>
<source>First</source>
<translation>第一</translation>
</message>
<message>
<source>Second</source>
<translation>第二</translation>
</message>
<message>
<source>Thrid</source>
<translation>第三</translation>
</message>
<message>
<source>Datasource manager not found</source>
<translation>数据源管理器未找到</translation>
</message>
<message>
<source>Export to PDF</source>
<translation>导出为PDF文件</translation>
</message>
<message>
<source>VLayout</source>
<translation>水平布局</translation>
</message>
<message>
<source>Dark</source>
<translation>暗</translation>
</message>
<message>
<source>Light</source>
<translation>亮</translation>
</message>
<message>
<source>Default</source>
<translation>默认</translation>
</message>
<message>
<source>Millimeters</source>
<translation>毫米</translation>
</message>
<message>
<source>Inches</source>
<translation>英寸</translation>
</message>
<message>
<source>margin</source>
<translation>边距</translation>
</message>
<message>
<source>''</source>
<translation>''</translation>
</message>
<message>
<source>SVG Item</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>image</source>
<translation type="unfinished">图像</translation>
</message>
<message>
<source>series</source>
<translation type="unfinished">数据系列</translation>
</message>
<message>
<source>Series</source>
<translation type="unfinished">数据系列</translation>
</message>
<message>
<source>X Axis</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Y Axis</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>X axis</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Y axis</source>
<translation type="unfinished"></translation>
</message>
<message>
<source>Axis</source>
<translation type="unfinished"></translation>
</message>
</context>
</TS> | PypiClean |
/IntelliCoder-0.5.2.tar.gz/IntelliCoder-0.5.2/intellicoder/msbuild/builders.py | from __future__ import division, absolute_import, print_function
from logging import getLogger
import os
from itertools import chain
from subprocess import CalledProcessError
from ..init import _
from ..utils import replace_ext
from .locators import VSPath, VCPath, SDKPath
logging = getLogger(__name__)
class Builder(object):
"""
Represent a builder.
"""
def __init__(self):
self.vs = VSPath()
self.sdk = SDKPath(self.vs.sdk_dir, self.vs.sdk_version)
self.vc = VCPath(self.vs.vc_dir, self.sdk)
def build(self, filenames, cl_args=None, link_args=None,
x64=False, out_dir=''):
"""
Compile source files and link object files.
"""
if not cl_args:
cl_args = []
if not link_args:
link_args = []
msvc, lib = self.vc.get_bin_and_lib(x64)
lib = self.make_lib(lib)
if out_dir:
cl_args.append('/Fo:' + out_dir + '\\')
include = self.make_inc(self.vc.inc + self.sdk.inc)
cl_args.extend(include + filenames)
try:
msvc.run_cl('/c', *cl_args)
except CalledProcessError as error:
logging.error(_('failed to compile: %s'), filenames)
logging.error(_('cl.exe returned:\n%s'), error.output)
return False
link_args.extend(lib + self.make_objs(filenames, out_dir))
try:
msvc.run_link(*link_args)
except CalledProcessError as error:
logging.error(_('failed to link: %s'), filenames)
logging.error(_('link.exe returned:\n%s'), error.output)
return False
return True
def native_build(self, filenames, cl_args=None, link_args=None,
x64=False, out_dir=''):
"""
Compile source files and link object files
to native binaries.
"""
if not cl_args:
cl_args = []
if not link_args:
link_args = []
cl_args.append('/D_AMD64_' if x64 else '/D_X86_')
link_args.extend(
['/driver', '/entry:DriverEntry',
'/subsystem:native', '/defaultlib:ntoskrnl'])
msvc, lib = self.vc.get_bin_and_lib(x64, native=True)
lib = self.make_lib(lib)
if out_dir:
cl_args.append('/Fo:' + out_dir + '\\')
inc = self.make_inc(
self.sdk.inc + self.sdk.get_inc(native=True)
)
cl_args.extend(filenames + inc)
try:
msvc.run_cl('/c', *cl_args)
except CalledProcessError as error:
logging.error(_('failed to compile: %s'), filenames)
logging.error(_('cl.exe returned:\n%s'), error.output)
return False
link_args.extend(lib + self.make_objs(filenames, out_dir))
try:
msvc.run_link(*link_args)
except CalledProcessError as error:
logging.error(_('failed to link: %s'), filenames)
logging.error(_('link.exe returned:\n%s'), error.output)
return False
return True
@staticmethod
def make_inc(incs):
"""
Make include directory for link.exe.
"""
inc_args = [['/I', inc] for inc in incs]
return list(chain.from_iterable(inc_args))
@staticmethod
def make_lib(libs):
"""
Make lib directory for link.exe.
"""
lib_args = ['/libpath:' + lib for lib in libs]
return lib_args
@staticmethod
def make_objs(names, out_dir=''):
"""
Make object file names for cl.exe and link.exe.
"""
objs = [replace_ext(name, '.obj') for name in names]
if out_dir:
objs = [os.path.join(out_dir, obj) for obj in objs]
return objs
def local_build(native, *args, **kwargs):
"""
Compile source files and link object files.
"""
method = 'native_build' if native else 'build'
logging.debug(_('build type: %s'), method)
return getattr(Builder(), method)(*args, **kwargs) | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/bill_history_response.py |
from msrest.serialization import Model
class BillHistoryResponse(Model):
"""BillHistoryResponse.
:param bill_id: The bill's bill id
:type bill_id: int
:param billing_period: The bill's billing period
:type billing_period: int
:param begin_date: The bill's begin date
:type begin_date: datetime
:param end_date: The bill's end date
:type end_date: datetime
:param created_date: The bill's created date
:type created_date: datetime
:param total_cost: The bill's total cost
:type total_cost: float
:param total_cost_unit:
:type total_cost_unit: ~energycap.sdk.models.UnitChild
:param void: The bill's void indicator
:type void: bool
:param accrual: The bill's accrual indicator
:type accrual: bool
:param invoice_number: The bill's invoice number
:type invoice_number: str
:param estimated: Indicates if the bill is estimated or not
:type estimated: bool
:param due_date: The bill's due date
:type due_date: datetime
:param statement_date: The bill's statement date
:type statement_date: datetime
:param bill_account_meters: The bill's account-meter summaries
:type bill_account_meters:
list[~energycap.sdk.models.BillAccountMeterChild]
"""
_attribute_map = {
'bill_id': {'key': 'billId', 'type': 'int'},
'billing_period': {'key': 'billingPeriod', 'type': 'int'},
'begin_date': {'key': 'beginDate', 'type': 'iso-8601'},
'end_date': {'key': 'endDate', 'type': 'iso-8601'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'total_cost': {'key': 'totalCost', 'type': 'float'},
'total_cost_unit': {'key': 'totalCostUnit', 'type': 'UnitChild'},
'void': {'key': 'void', 'type': 'bool'},
'accrual': {'key': 'accrual', 'type': 'bool'},
'invoice_number': {'key': 'invoiceNumber', 'type': 'str'},
'estimated': {'key': 'estimated', 'type': 'bool'},
'due_date': {'key': 'dueDate', 'type': 'iso-8601'},
'statement_date': {'key': 'statementDate', 'type': 'iso-8601'},
'bill_account_meters': {'key': 'billAccountMeters', 'type': '[BillAccountMeterChild]'},
}
def __init__(self, **kwargs):
super(BillHistoryResponse, self).__init__(**kwargs)
self.bill_id = kwargs.get('bill_id', None)
self.billing_period = kwargs.get('billing_period', None)
self.begin_date = kwargs.get('begin_date', None)
self.end_date = kwargs.get('end_date', None)
self.created_date = kwargs.get('created_date', None)
self.total_cost = kwargs.get('total_cost', None)
self.total_cost_unit = kwargs.get('total_cost_unit', None)
self.void = kwargs.get('void', None)
self.accrual = kwargs.get('accrual', None)
self.invoice_number = kwargs.get('invoice_number', None)
self.estimated = kwargs.get('estimated', None)
self.due_date = kwargs.get('due_date', None)
self.statement_date = kwargs.get('statement_date', None)
self.bill_account_meters = kwargs.get('bill_account_meters', None) | PypiClean |
/NCParse-1.0.1.tar.gz/NCParse-1.0.1/src/gcode.py | from operator import truediv
import re
from typing import Type
from src.gcodesegment import GCodeSegment
# extract letter-digit pairs
g_pattern = re.compile('([A-Z])([-+]?[0-9.]+)')
# white spaces and comments start with ';' and in '()'
clean_pattern = re.compile('\(.*?\)|;.*')
class GCode(object):
# represents a single line of GCode
def __init__(self, segments, raw_line):
self.segments = []
self.parse_segments(segments)
self.raw_line = raw_line
def parse_segments(self, segments):
for i in range(0, len(segments)):
try:
x = None
y = None
z = None
m = g_pattern.findall(segments[i])
for j in range(0, len(m)):
if m[j][0] != 'X' and m[j][0] != 'Y' and m[j][0] != 'Z':
#1) see if the next part of this command are dimensions
#2) if not, just add the segment normally
if len(segments) > 1:
for k in range(i + 1, len(segments)):
if segments[k][0].startswith('X') or segments[k][0].startswith('Y') or segments[k][0].startswith('Z'):
m2 = g_pattern.findall(segments[k])
if m2[0][0] == 'X':
x = m2[0][1]
elif m2[0][0] == 'Y':
y = m2[0][1]
elif m2[0][0] == 'Z':
z = m2[0][1]
else:
break
self.segments.append(GCodeSegment(m[0][0],m[0][1], x, y, z, segments[j]))
except TypeError as e:
print (f'ERROR PARSING SEGMENT {s} :: {e}')
@staticmethod
def parse_line(line):
line = re.sub(clean_pattern, '', line)
segments = line.split()
gCode = GCode(segments, line)
return gCode | PypiClean |
/NlpToolkit-Math-1.0.18.tar.gz/NlpToolkit-Math-1.0.18/Math/Matrix.py | from __future__ import annotations
import copy
import random
import math
from Math.MatrixNotPositiveDefinite import MatrixNotPositiveDefinite
from Math.MatrixNotSquare import MatrixNotSquare
from Math.Eigenvector import Eigenvector
from Math.MatrixDimensionMismatch import MatrixDimensionMismatch
from Math.Vector import Vector
from Math.MatrixRowMismatch import MatrixRowMismatch
from Math.DeterminantZero import DeterminantZero
from Math.MatrixRowColumnMismatch import MatrixRowColumnMismatch
from Math.MatrixColumnMismatch import MatrixColumnMismatch
from Math.MatrixNotSymmetric import MatrixNotSymmetric
class Matrix(object):
__row: int
__col: int
__values: list
def __init__(self,
row,
col=None,
minValue=None,
maxValue=None,
seed=None):
"""
Constructor of Matrix class which takes row and column numbers (Vectors) as inputs.
PARAMETERS
----------
row : int (or Vector)
is used to create matrix.
col : int (or Vector)
is used to create matrix.
minValue : float
minimum Value for the initialization
maxValue : float
maximum Value for the initialization
seed : int
seed for the random
"""
if isinstance(row, int):
self.__row = row
if col is not None:
self.__col = col
if minValue is None:
self.initZeros()
elif maxValue is None:
self.initZeros()
for i in range(self.__row):
self.__values[i][i] = minValue
else:
random.seed(seed)
self.__values = [[random.uniform(minValue, maxValue) for _ in range(self.__col)] for _ in
range(self.__row)]
else:
self.__col = row
self.initZeros()
for i in range(self.__row):
self.__values[i][i] = 1.0
elif isinstance(row, Vector) and isinstance(col, Vector):
self.__row = row.size()
self.__col = col.size()
self.initZeros()
for i in range(row.size()):
for j in range(col.size()):
self.__values[i][j] = row.getValue(i) * col.getValue(j)
def initZeros(self):
self.__values = [[0 for _ in range(self.__col)] for _ in range(self.__row)]
def clone(self) -> Matrix:
return copy.deepcopy(self)
def getValue(self,
rowNo: int,
colNo: int) -> float:
"""
The getter for the index at given rowNo and colNo of values list.
PARAMETERS
----------
rowNo : int
integer input for row number.
colNo : int
integer input for column number.
RETURNS
-------
double
item at given index of values list.
"""
return self.__values[rowNo][colNo]
def setValue(self,
rowNo: int,
colNo: int,
value: float):
"""
The setter for the value at given index of values list.
PARAMETERS
----------
rowNo : int
integer input for row number.
colNo : int
integer input for column number.
value : double
is used to set at given index.
"""
self.__values[rowNo][colNo] = value
def addValue(self,
rowNo: int,
colNo: int,
value: float):
"""
The addValue method adds the given value to the item at given index of values list.
PARAMETERS
----------
rowNo : int
integer input for row number.
colNo : int
integer input for column number.
value : double
is used to add to given item at given index.
"""
self.__values[rowNo][colNo] += value
def increment(self,
rowNo: int,
colNo: int):
"""
The increment method adds 1 to the item at given index of values list.
PARAMETERS
----------
rowNo : int
integer input for row number.
colNo : int
integer input for column number.
"""
self.__values[rowNo][colNo] += 1
def getRow(self) -> int:
"""
The getter for the row variable.
RETURNS
-------
int
row number.
"""
return self.__row
def getRowVector(self, row: int) -> Vector:
"""
The getRowVector method returns the vector of values list at given row input.
PARAMETERS
----------
row : int
row integer input for row number.
RETURNS
-------
Vector
Vector of values list at given row input.
"""
rowList = self.__values[row]
rowVector = Vector(rowList)
return rowVector
def getColumn(self) -> int:
"""
The getter for the col variable.
RETURNS
-------
int
column number.
"""
return self.__col
def getColumnVector(self, column: int) -> list:
"""
* The getColumnVector method creates a Vector and adds items at given column number of values list
* to the Vector.
PARAMETERS
----------
column : int
column integer input for column number.
RETURNS
-------
Vector
Vector of given column number.
"""
columnVector = []
for i in range(self.__row):
columnVector.append(self.__values[i][column])
return columnVector
def columnWiseNormalize(self):
"""
The columnWiseNormalize method, first accumulates items column by column then divides items
by the summation.
"""
for i in range(self.__row):
total = sum(self.__values[i])
self.__values[i][:] = [x / total for x in self.__values[i]]
def multiplyWithConstant(self, constant: float):
"""
The multiplyWithConstant method takes a constant as an input and multiplies each item of values list
with given constant.
PARAMETERS
----------
constant : double
constant value to multiply items of values list.
"""
for i in range(self.__row):
self.__values[i][:] = [x * constant for x in self.__values[i]]
def divideByConstant(self, constant: float):
"""
The divideByConstant method takes a constant as an input and divides each item of values list
with given constant.
PARAMETERS
----------
constant : double
constant value to divide items of values list.
"""
for i in range(self.__row):
self.__values[i][:] = [x / constant for x in self.__values[i]]
def add(self, m: Matrix):
"""
The add method takes a Matrix as an input and accumulates values list with the
corresponding items of given Matrix. If the sizes of both Matrix and values list do not match,
it throws MatrixDimensionMismatch exception.
PARAMETERS
----------
m : Matrix
Matrix type input.
"""
if self.__row != m.__row or self.__col != m.__col:
raise MatrixDimensionMismatch
for i in range(self.__row):
for j in range(self.__col):
self.__values[i][j] += m.__values[i][j]
def addRowVector(self,
rowNo: int,
v: Vector):
"""
The add method which takes a row number and a Vector as inputs. It sums up the corresponding values at the given
row of values list and given Vector. If the sizes of both Matrix and values list do not match, it throws
MatrixColumnMismatch exception.
PARAMETERS
----------
rowNo : int
integer input for row number.
v : Vector
Vector type input.
"""
if self.__col != v.size():
raise MatrixColumnMismatch
for i in range(self.__col):
self.__values[rowNo][i] += v.getValue(i)
def subtract(self, m: Matrix):
"""
The subtract method takes a Matrix as an input and subtracts from values list the
corresponding items of given Matrix. If the sizes of both Matrix and values list do not match,
it throws {@link MatrixDimensionMismatch} exception.
PARAMETERS
----------
m : Matrix
Matrix type input.
"""
if self.__row != m.__row or self.__col != m.__col:
raise MatrixDimensionMismatch
for i in range(self.__row):
for j in range(self.__col):
self.__values[i][j] -= m.__values[i][j]
def multiplyWithVectorFromLeft(self, v: Vector) -> Vector:
"""
The multiplyWithVectorFromLeft method takes a Vector as an input and creates a result list.
Then, multiplies values of input Vector starting from the left side with the values list,
accumulates the multiplication, and assigns to the result list. If the sizes of both Vector
and row number do not match, it throws MatrixRowMismatch exception.
PARAMETERS
----------
v : Vector
Vector type input.
RETURNS
-------
Vector
Vector that holds the result.
"""
if self.__row != v.size():
raise MatrixRowMismatch
result = Vector()
for i in range(self.__col):
total = 0.0
for j in range(self.__row):
total += v.getValue(j) * self.__values[j][i]
result.add(total)
return result
def multiplyWithVectorFromRight(self, v: Vector) -> Vector:
"""
The multiplyWithVectorFromRight method takes a Vector as an input and creates a result list.
Then, multiplies values of input Vector starting from the right side with the values list,
accumulates the multiplication, and assigns to the result list. If the sizes of both Vector
and row number do not match, it throws MatrixColumnMismatch exception.
PARAMETERS
----------
v : Vector
Vector type input.
RETURNS
-------
Vector
Vector that holds the result.
"""
if self.__col != v.size():
raise MatrixColumnMismatch
result = Vector()
for i in range(self.__row):
total = 0.0
for j in range(self.__col):
total += v.getValue(j) * self.__values[i][j]
result.add(total)
return result
def columnSum(self, columnNo: int) -> float:
"""
The columnSum method takes a column number as an input and accumulates items at given column number of values
list.
PARAMETERS
----------
columnNo : int
Column number input.
RETURNS
-------
double
summation of given column of values list.
"""
total = 0
for i in range(self.__row):
total += self.__values[i][columnNo]
return total
def sumOfRows(self) -> Vector:
"""
The sumOfRows method creates a mew result Vector and adds the result of columnDum method's corresponding
index to the newly created result Vector.
RETURNS
-------
Vector
Vector that holds column sum.
"""
result = Vector()
for i in range(self.__col):
result.add(self.columnSum(i))
return result
def rowSum(self, rowNo: int) -> float:
"""
The rowSum method takes a row number as an input and accumulates items at given row number of values list.
* @param rowNo Row number input.
* @return summation of given row of values {@link java.lang.reflect.Array}.
"""
return sum(self.__values[rowNo])
def multiply(self, m: Matrix) -> Matrix:
"""
The multiply method takes a Matrix as an input. First it creates a result Matrix and puts the
accumulatated multiplication of values list and given Matrix into result
Matrix. If the size of Matrix's row size and values list's column size do not match,
it throws MatrixRowColumnMismatch exception.
PARAMETERS
----------
m : Matrix
Matrix type input.
RETURNS
-------
Matrix
result Matrix.
"""
if self.__col != m.__row:
raise MatrixRowColumnMismatch
result = Matrix(self.__row, m.__col)
for i in range(self.__row):
for j in range(m.__col):
total = 0.0
for k in range(self.__col):
total += self.__values[i][k] * m.__values[k][j]
result.__values[i][j] = total
return result
def elementProduct(self, m: Matrix) -> Matrix:
"""
The elementProduct method takes a Matrix as an input and performs element wise multiplication. Puts result
to the newly created Matrix. If the size of Matrix's row and column size does not match with the values
list's row and column size, it throws MatrixDimensionMismatch exception.
PARAMETERS
----------
m : Matrix
Matrix type input.
RETURNS
-------
Matrix
result Matrix.
"""
if self.__row != m.__row or self.__col != m.__col:
raise MatrixDimensionMismatch
result = Matrix(self.__row, self.__col)
for i in range(self.__row):
for j in range(self.__col):
result.__values[i][j] = self.__values[i][j] * m.__values[i][j]
return result
def sumOfElements(self) -> float:
"""
The sumOfElements method accumulates all the items in values list and
returns this summation.
RETURNS
-------
float
sum of the items of values list.
"""
total = 0.0
for i in range(self.__row):
total += sum(self.__values[i])
return total
def trace(self) -> float:
"""
The trace method accumulates items of values list at the diagonal.
RETURNS
-------
float
sum of items at diagonal.
"""
if self.__row != self.__col:
raise MatrixNotSquare
total = 0.0
for i in range(self.__row):
total += self.__values[i][i]
return total
def transpose(self) -> Matrix:
"""
The transpose method creates a new Matrix, then takes the transpose of values list
and puts transposition to the Matrix.
RETURNS
-------
Matrix
Matrix type output.
"""
result = Matrix(self.__col, self.__row)
for i in range(self.__row):
for j in range(self.__col):
result.__values[j][i] = self.__values[i][j]
return result
def partial(self,
rowStart: int,
rowEnd: int,
colStart: int,
colEnd: int) -> Matrix:
"""
The partial method takes 4 integer inputs; rowStart, rowEnd, colStart, colEnd and creates a Matrix size of
rowEnd - rowStart + 1 x colEnd - colStart + 1. Then, puts corresponding items of values list
to the new result Matrix.
PARAMETERS
----------
rowStart : int
integer input for defining starting index of row.
rowEnd : int
integer input for defining ending index of row.
colStart : int
integer input for defining starting index of column.
colEnd : int
integer input for defining ending index of column.
RETURNS
-------
Matrix
result Matrix.
"""
result = Matrix(rowEnd - rowStart + 1, colEnd - colStart + 1)
for i in range(rowStart, rowEnd + 1):
for j in range(colStart, colEnd + 1):
result.__values[i - rowStart][j - colStart] = self.__values[i][j]
return result
def isSymmetric(self) -> bool:
"""
The isSymmetric method compares each item of values list at positions (i, j) with (j, i)
and returns true if they are equal, false otherwise.
RETURNS
-------
bool
true if items are equal, false otherwise.
"""
if self.__row != self.__col:
raise MatrixNotSquare
for i in range(self.__row - 1):
for j in range(self.__row):
if self.__values[i][j] != self.__values[j][i]:
return False
return True
def determinant(self) -> float:
"""
The determinant method first creates a new list, and copies the items of values
list into new list. Then, calculates the determinant of this
new list.
RETURNS
-------
float
determinant of values list.
"""
if self.__row != self.__col:
raise MatrixNotSquare
det = 1.0
copyOfMatrix = copy.deepcopy(self)
for i in range(self.__row):
det *= copyOfMatrix.__values[i][i]
if det == 0.0:
break
for j in range(i + 1, self.__row):
ratio = copyOfMatrix.__values[j][i] / copyOfMatrix.__values[i][i]
for k in range(i, self.__col):
copyOfMatrix.__values[j][k] = copyOfMatrix.__values[j][k] - copyOfMatrix.__values[i][k] * ratio
return det
def inverse(self):
"""
The inverse method finds the inverse of values list.
"""
if self.__row != self.__col:
raise MatrixNotSquare
b = Matrix(self.__row, self.__row)
indxc = []
indxr = []
ipiv = []
for j in range(self.__row):
ipiv.append(0)
for i in range(1, self.__row + 1):
big = 0.0
irow = -1
icol = -1
for j in range(1, self.__row + 1):
if ipiv[j - 1] != 1:
for k in range(1, self.__row + 1):
if ipiv[k - 1] == 0:
if abs(self.__values[j - 1][k - 1]) >= big:
big = abs(self.__values[j - 1][k - 1])
irow = j
icol = k
if irow == -1 or icol == -1:
raise DeterminantZero
ipiv[icol - 1] = ipiv[icol - 1] + 1
if irow != icol:
for l in range(1, self.__row + 1):
dum = self.__values[irow - 1][l - 1]
self.__values[irow - 1][l - 1] = self.__values[icol - 1][l - 1]
self.__values[icol - 1][l - 1] = dum
for l in range(1, self.__row + 1):
dum = b.__values[irow - 1][l - 1]
b.__values[irow - 1][l - 1] = b.__values[icol - 1][l - 1]
b.__values[icol - 1][l - 1] = dum
indxr.append(irow)
indxc.append(icol)
if self.__values[icol - 1][icol - 1] == 0:
raise DeterminantZero
pivinv = 1.0 / self.__values[icol - 1][icol - 1]
self.__values[icol - 1][icol - 1] = 1.0
for l in range(1, self.__row + 1):
self.__values[icol - 1][l - 1] = self.__values[icol - 1][l - 1] * pivinv
for l in range(1, self.__row + 1):
b.__values[icol - 1][l - 1] = b.__values[icol - 1][l - 1] * pivinv
for ll in range(1, self.__row + 1):
if ll != icol:
dum = self.__values[ll - 1][icol - 1]
self.__values[ll - 1][icol - 1] = 0.0
for l in range(1, self.__row + 1):
self.__values[ll - 1][l - 1] = self.__values[ll - 1][l - 1] - self.__values[icol - 1][
l - 1] * dum
for l in range(1, self.__row + 1):
b.__values[ll - 1][l - 1] = b.__values[ll - 1][l - 1] - b.__values[icol - 1][l - 1] * dum
for l in range(self.__row, 0, -1):
if indxr[l - 1] != indxc[l - 1]:
for k in range(1, self.__row + 1):
dum = self.__values[k - 1][indxr[l - 1] - 1]
self.__values[k - 1][indxr[l - 1] - 1] = self.__values[k - 1][indxc[l - 1] - 1]
self.__values[k - 1][indxc[l - 1] - 1] = dum
def choleskyDecomposition(self) -> Matrix:
"""
The choleskyDecomposition method creates a new Matrix and puts the Cholesky Decomposition of values Array
into this Matrix. Also, it throws MatrixNotSymmetric exception if it is not symmetric and
MatrixNotPositiveDefinite exception if the summation is negative.
RETURNS
-------
Matrix
Matrix type output.
"""
if not self.isSymmetric():
raise MatrixNotSymmetric
b = Matrix(self.__row, self.__col)
for i in range(self.__row):
for j in range(i, self.__row):
total = self.__values[i][j]
for k in range(i - 1, -1, -1):
total -= self.__values[i][k] * self.__values[j][k]
if i == j:
if total <= 0.0:
raise MatrixNotPositiveDefinite
b.__values[i][i] = math.sqrt(total)
else:
b.__values[j][i] = total / b.__values[i][i]
return b
def __rotate(self,
s: float,
tau: float,
i: int,
j: int,
k: int,
l: int):
"""
The rotate method rotates values list according to given inputs.
PARAMETERS
----------
s : double
double input.
tau : double
double input.
i : int
integer input.
j : int
integer input.
k : int
integer input.
l : int
integer input.
"""
g = self.__values[i][j]
h = self.__values[k][l]
self.__values[i][j] = g - s * (h + g * tau)
self.__values[k][l] = h + s * (g - h * tau)
def characteristics(self) -> list:
"""
The characteristics method finds and returns a sorted list of Eigenvecto}s. And it throws
MatrixNotSymmetric exception if it is not symmetric.
RETURNS
-------
list
A sorted list of Eigenvectors.
"""
if not self.isSymmetric():
raise MatrixNotSymmetric
matrix1 = copy.deepcopy(self)
v = Matrix(self.__row, self.__row, 1.0)
d = []
b = []
z = []
EPS = 0.000000000000000001
for ip in range(self.__row):
b.append(matrix1.__values[ip][ip])
d.append(matrix1.__values[ip][ip])
z.append(0.0)
for i in range(1, 51):
sm = 0.0
for ip in range(self.__row - 1):
for iq in range(ip + 1, self.__row):
sm += abs(matrix1.__values[ip][iq])
if sm == 0.0:
break
if i < 4:
threshold = 0.2 * sm / (self.__row ** 2)
else:
threshold = 0.0
for ip in range(self.__row - 1):
for iq in range(ip + 1, self.__row):
g = 100.0 * abs(matrix1.__values[ip][iq])
if i > 4 and g <= EPS * abs(d[ip]) and g <= EPS * abs(d[iq]):
matrix1.__values[ip][iq] = 0.0
else:
if abs(matrix1.__values[ip][iq]) > threshold:
h = d[iq] - d[ip]
if g <= EPS * abs(h):
t = matrix1.__values[ip][iq] / h
else:
theta = 0.5 * h / matrix1.__values[ip][iq]
t = 1.0 / (abs(theta) + math.sqrt(1.0 + theta ** 2))
if theta < 0.0:
t = -t
c = 1.0 / math.sqrt(1 + t ** 2)
s = t * c
tau = s / (1.0 + c)
h = t * matrix1.__values[ip][iq]
z[ip] -= h
z[iq] += h
d[ip] -= h
d[iq] += h
matrix1.__values[ip][iq] = 0.0
for j in range(ip):
matrix1.__rotate(s, tau, j, ip, j, iq)
for j in range(ip + 1, iq):
matrix1.__rotate(s, tau, ip, j, j, iq)
for j in range(iq + 1, self.__row):
matrix1.__rotate(s, tau, ip, j, iq, j)
for j in range(self.__row):
v.__rotate(s, tau, j, ip, j, iq)
for ip in range(self.__row):
b[ip] = b[ip] + z[ip]
d[ip] = b[ip]
z[ip] = 0.0
result = []
for i in range(self.__row):
if d[i] > 0:
result.append(Eigenvector(d[i], v.getColumnVector(i)))
result.sort(key=lambda eigenvector: eigenvector.eigenvalue, reverse=True)
return result
def __repr__(self):
return f"{self.__values}" | PypiClean |
/Flask-WebSub-0.4.tar.gz/Flask-WebSub-0.4/flask_websub/hub/__init__.py | import functools
import itertools
from .blueprint import build_blueprint, A_DAY
from .tasks import make_request_retrying, send_change_notification, \
subscribe, unsubscribe
from .storage import SQLite3HubStorage
__all__ = ('Hub', 'SQLite3HubStorage')
class Hub:
"""This is the API to the hub package. The constructor requires a storage
object, and also accepts a couple of optional configuration values (the
defaults are shown as well):
- BACKOFF_BASE=8.0: When a hub URL cannot be reached, exponential backoff
is used to control retrying. This parameter scales the whole process.
Lowering it means trying more frequently, but also for a shorter time.
Highering it means the reverse.
- MAX_ATTEMPTS=10: The amount of attempts the retrying process makes.
- PUBLISH_SUPPORTED=False: makes it possible to do a POST request to the
hub endpoint with mode=publish. This is nice for testing, but as it does
no input validation, you should not leave this enabled in production.
- SIGNATURE_ALGORITHM='sha512': The algorithm to sign a content
notification body with. Other possible values are sha1, sha256 and
sha384.
- REQUEST_TIMEOUT=3: Specifies how long to wait before considering a
request to have failed.
- HUB_MIN_LEASE_SECONDS: The minimal lease_seconds value the hub will
accept
- HUB_DEFAULT_LEASE_SECONDS: The lease_seconds value the hub will use if
the subscriber does not have a preference
- HUB_MAX_LEASE_SECONDS: The maximum lease_seconds value the hub will
accept
You can pass in a celery object too, or do that later using init_celery. It
is required to do so before actually using the hub, though.
User-facing properties have doc strings. Other properties should be
considered implementation details.
"""
counter = itertools.count()
def __init__(self, storage, celery=None, **config):
self.validators = []
self.storage = storage
self.config = config
if celery:
self.init_celery(celery)
def endpoint_hook(self):
"""Override this method to hook into the endpoint handling. Anything
this method returns will be forwarded to validation functions when
subscribing.
"""
def build_blueprint(hub, url_prefix=''):
"""Build a blueprint containing a Flask route that is the hub endpoint.
"""
return build_blueprint(hub, url_prefix)
def init_celery(self, celery):
"""Registers the celery tasks on the hub object."""
count = next(self.counter)
def task_with_hub(f, **opts):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(self, *args, **kwargs)
# Make sure newer instances don't overwride older ones.
wrapper.__name__ = wrapper.__name__ + '_' + str(count)
return celery.task(**opts)(wrapper)
# tasks for internal use:
self.subscribe = task_with_hub(subscribe)
self.unsubscribe = task_with_hub(unsubscribe)
max_attempts = self.config.get('MAX_ATTEMPTS', 10)
make_req = task_with_hub(make_request_retrying, bind=True,
max_retries=max_attempts)
self.make_request_retrying = make_req
# user facing tasks
# wrapped by send_change_notification:
self.send_change = task_with_hub(send_change_notification)
# wrapped by cleanup_expired_subscriptions
@task_with_hub
def cleanup(hub):
self.storage.cleanup_expired_subscriptions()
self.cleanup = cleanup
# wrapped by schedule_cleanup
def schedule(every_x_seconds=A_DAY):
celery.add_periodic_task(every_x_seconds,
self.cleanup_expired_subscriptions.s())
self.schedule = schedule
@property
def send_change_notification(self):
"""Allows you to notify subscribers of a change to a `topic_url`. This
is a celery task, so you probably will actually want to call
hub.send_change_notification.delay(topic_url, updated_content). The
last argument is optional. If passed in, it should be an object with
two properties: `headers` (dict-like), and `content` (a base64-encoded
string). If left out, the updated content will be fetched from the
topic url directly.
"""
return self.send_change
@property
def cleanup_expired_subscriptions(self):
"""Removes any expired subscriptions from the backing data store.
It takes no arguments, and is a celery task.
"""
return self.cleanup
@property
def schedule_cleanup(self):
"""schedule_cleanup(every_x_seconds=A_DAY): schedules the celery
task `cleanup_expired_subscriptions` as a recurring event, the
frequency of which is determined by its parameter. This is not a
celery task itself (as the cleanup is only scheduled), and is a
convenience function.
"""
return self.schedule
def register_validator(self, f):
"""Register `f` as a validation function for subscription requests. It
gets a callback_url and topic_url as its arguments, and should return
None if the validation succeeded, or a string describing the problem
otherwise.
"""
self.validators.append(f) | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/node_modules/bower/packages/bower-config/CHANGELOG.md | # Changelog
## 1.4.2
- Prevent errors when expanded env variable does not exist
## 1.4.2
- Update minimist to 0.2.1 to fix security issue
## 1.4.0
- Change default shorthand resolver from git:// to https://
## 1.3.1
- Ignore hook scripts for environment variable expansion
## 1.3.0 - 2015-12-07
- Allow the use of environment variables in .bowerrc. Fixes [#41](https://github.com/bower/config/issues/41)
- Loads the .bowerrc file from the cwd specified on the command line. Fixes [bower/bower#1993](https://github.com/bower/bower/issues/1993)
- Allwow for array notation in ENV variables [#44](https://github.com/bower/config/issues/44)
## 1.2.3 - 2015-11-27
- Restores env variables if they are undefined at the beginning
- Handles default setting for config.ca. Together with [bower/bower PR #1972](https://github.com/bower/bower/pull/1972), fixes downloading with `strict-ssl` using custom CA
- Displays an error message if .bowerrc is a directory instead of file. Fixes [bower/bower#2022](https://github.com/bower/bower/issues/2022)
## 1.2.2 - 2015-10-16
- Fixes registry configurartion expanding [bower/bower#1950](https://github.com/bower/bower/issues/1950)
## 1.2.1 - 2015-10-15
- Fixes case insenstivity HTTP_PROXY setting issue on Windows
## 1.2.0 - 2015-09-28
- Prevent defaulting cwd to process.cwd()
## 1.1.2 - 2015-09-27
- Performs only camel case normalisation before merging
## 1.1.1 - 2015-09-27
- Fix: Merge extra options after camel-case normalisation, instead of before it
## 1.1.0 - 2015-09-27
- Allow for overwriting options with .load(overwrites) / .read(cwd, overwrites)
## 1.0.1 - 2015-09-27
- Update dependencies and relax "mout" version range
- Most significant changes:
- graceful-fs updated from 2.x version to 4.x
- osenv updated to from 0.0.x to 0.1.x, [tmp location changed](https://github.com/npm/osenv/commit/d6eddbc026538b09026b1dbd60fbc081a8c67e03)
## 1.0.0 - 2015-09-27
- Support for no-proxy configuration variable
- Overwrite HTTP_PROXY, HTTPS_PROXY, and NO_PROXY env variables in load method
- Normalise paths to certificates with contents of them, [#28](https://github.com/bower/config/pull/28)
## 0.6.1 - 2015-04-1
- Fixes merging .bowerrc files upward directory tree. [#25](https://github.com/bower/config/issues/25)
## 0.6.0 - 2015-03-30
- Merge .bowerrc files upward directory tree (fixes [bower/bower#1689](https://github.com/bower/bower/issues/1689)) [#24](https://github.com/bower/config/pull/24)
- Allow NPM config variables (resolves [bower/bower#1711](https://github.com/bower/bower/issues/1711)) [#23](https://github.com/bower/config/pull/23)
## 0.5.2 - 2014-06-09
- Fixes downloading of bower modules with ignores when .bowerrc is overridden with a relative tmp path. [#17](https://github.com/bower/config/issues/17) [bower/bower#1299](https://github.com/bower/bower/issues/1299)
## 0.5.1 - 2014-05-21
- [perf] Uses the same mout version as bower
- [perf] Uses only relevant parts of mout. Related [bower/bower#1134](https://github.com/bower/bower/pull/1134)
## 0.5.0 - 2013-08-30
- Adds a DEFAULT_REGISTRY key to the Config class that exposes the bower registry UR. [#6](https://github.com/bower/config/issues/6)
## 0.4.5 - 2013-08-28
- Fixes crashing when home is not set
## 0.4.4 - 2013-08-21
- Supports nested environment variables [#8](https://github.com/bower/config/issues/8)
## 0.4.3 - 2013-08-19
- Improvement in argv.config parsing
## 0.4.2 - 2013-08-18
- Sets interative to auto
## 0.4.1 - 2013-08-18
- Generates a fake user instead of using 'unknown'
## 0.4.0 - 2013-08-16
- Suffixes temp folder with the user and 'bower'
## 0.3.5 - 2013-08-14
- Casts buffer to string
## 0.3.4 - 2013-08-11
- Empty .bowerrc files no longer throw an error.
## 0.3.3 - 2013-08-11
- Changes git folder to empty (was not being used anyway)
## 0.3.2 - 2013-08-07
- Uses a known user agent by default when a proxy.
## 0.3.1 - 2013-08-06
- Fixes Typo
## 0.3.0 - 2013-08-06
- Appends the username when using the temporary folder.
| PypiClean |
/AaioAPI-1.0.2.tar.gz/AaioAPI-1.0.2/README.md | <h1><img src="https://aaio.io/assets/landing/img/logo-m.svg" width=30 height=30> AAIO</h1>
A Library for easy work with [Aaio API](https://wiki.aaio.io/), in the Python programming language.
Библиотека для легкой работы с [Aaio API](https://wiki.aaio.io/), на языке программирования Python.
## What is available in this library? - Что имеется в данной библиотеке?
- Creating a bill for payment - Создание счета для оплаты
- Quick check of payment status - Быстрая проверка статуса оплаты
- Get balance - Получение баланса
- The largest number of payment methods - Наибольшее количество способов оплаты
## Installation - Установка
Required version [Python](https://www.python.org/): not lower than 3.7
Требуемая версия [Python](https://www.python.org/): не ниже 3.7
```cmd
pip install AaioAPI
```
## Using - Использование
To get started, you need to register and get all the necessary store data [via this link on the official AAIO website](https://aaio.io/cabinet/merchants/)
Чтобы начать работу, вам необходимо зарегистрироваться и получить все необходимые данные магазина [по этой ссылке на оф.сайте AAIO](https://aaio.io/cabinet/merchants/)
### Get balance - Получение баланса
Чтобы получить доступ к балансу, скопируйте ваш [API Ключ](https://aaio.io/cabinet/api/)
``` python
import AaioAPI
client = 'your_api_key'
balance = AaioAPI.get_balance(client)
balance = balance['balance']
# balance = {
# "type": "success",
# "balance": 50.43, // Текущий доступный баланс
# "referral": 0, // Текущий реферальный баланс
# "hold": 1.57 // Текущий замороженный баланс
# }
print(balance)
```
### Example of creating an invoice and receiving a payment link - Пример создания счета и получения ссылки на оплату
Здесь вам понадобятся данные вашего магазина
``` python
from AaioAPI import Aaio
import AaioAPI, time
payment = Aaio()
merchant_id = 'your_shop_id' # ID магазина
amount = 25 # Сумма к оплате
currency = 'RUB' # Валюта заказа
secret = 'your_secret_key' # Секретный ключ №1 из настроек магазина
desc = 'Test payment.' # Описание заказа
url_aaio = AaioAPI.pay(merchant_id, amount, currency, secret, desc)
print(url_aaio) # Ссылка на оплату
```
### Example of a status check - Пример проверки статуса
Проверяем статус платежа каждые 5 секунд с помощью цикла
```python
while True:
AaioAPI.check_payment(url_aaio, payment)
if payment.is_expired(): # Если счет просрочен
print("Invoice was expired")
break
elif payment.is_success(): # Если оплата прошла успешно
print("Payment was succesful")
break
else: # Или если счет ожидает оплаты
print("Invoice wasn't paid. Please pay the bill")
time.sleep(5)
```
### Full Code - Полный код
```python
from AaioAPI import Aaio
import AaioAPI, time
payment = Aaio()
merchant_id = 'your_shop_id' # ID магазина
amount = 25 # Сумма к оплате
currency = 'RUB' # Валюта заказа
secret = 'your_secret_key' # Секретный ключ №1 из настроек магазина
desc = 'Test payment.' # Описание заказа
url_aaio = AaioAPI.pay(merchant_id, amount, currency, secret, desc)
print(url_aaio) # Ссылка на оплату
while True:
AaioAPI.check_payment(url_aaio, payment)
if payment.is_expired(): # Если счет просрочен
print("Invoice was expired")
break
elif payment.is_success(): # Если оплата прошла успешно
print("Payment was succesful")
break
else: # Или если счет ожидает оплаты
print("Invoice wasn't paid. Please pay the bill")
time.sleep(5)
```
## License
MIT | PypiClean |
/Flask%20of%20Cinema-1.0.0.tar.gz/Flask of Cinema-1.0.0/static/js/toasts.js | (function($, anim) {
'use strict';
let _defaults = {
html: '',
displayLength: 4000,
inDuration: 300,
outDuration: 375,
classes: '',
completeCallback: null,
activationPercent: 0.8
};
class Toast {
constructor(options) {
/**
* Options for the toast
* @member Toast#options
*/
this.options = $.extend({}, Toast.defaults, options);
this.message = this.options.html;
/**
* Describes current pan state toast
* @type {Boolean}
*/
this.panning = false;
/**
* Time remaining until toast is removed
*/
this.timeRemaining = this.options.displayLength;
if (Toast._toasts.length === 0) {
Toast._createContainer();
}
// Create new toast
Toast._toasts.push(this);
let toastElement = this._createToast();
toastElement.M_Toast = this;
this.el = toastElement;
this.$el = $(toastElement);
this._animateIn();
this._setTimer();
}
static get defaults() {
return _defaults;
}
/**
* Get Instance
*/
static getInstance(el) {
let domElem = !!el.jquery ? el[0] : el;
return domElem.M_Toast;
}
/**
* Append toast container and add event handlers
*/
static _createContainer() {
let container = document.createElement('div');
container.setAttribute('id', 'toast-container');
// Add event handler
container.addEventListener('touchstart', Toast._onDragStart);
container.addEventListener('touchmove', Toast._onDragMove);
container.addEventListener('touchend', Toast._onDragEnd);
container.addEventListener('mousedown', Toast._onDragStart);
document.addEventListener('mousemove', Toast._onDragMove);
document.addEventListener('mouseup', Toast._onDragEnd);
document.body.appendChild(container);
Toast._container = container;
}
/**
* Remove toast container and event handlers
*/
static _removeContainer() {
// Add event handler
document.removeEventListener('mousemove', Toast._onDragMove);
document.removeEventListener('mouseup', Toast._onDragEnd);
$(Toast._container).remove();
Toast._container = null;
}
/**
* Begin drag handler
* @param {Event} e
*/
static _onDragStart(e) {
if (e.target && $(e.target).closest('.toast').length) {
let $toast = $(e.target).closest('.toast');
let toast = $toast[0].M_Toast;
toast.panning = true;
Toast._draggedToast = toast;
toast.el.classList.add('panning');
toast.el.style.transition = '';
toast.startingXPos = Toast._xPos(e);
toast.time = Date.now();
toast.xPos = Toast._xPos(e);
}
}
/**
* Drag move handler
* @param {Event} e
*/
static _onDragMove(e) {
if (!!Toast._draggedToast) {
e.preventDefault();
let toast = Toast._draggedToast;
toast.deltaX = Math.abs(toast.xPos - Toast._xPos(e));
toast.xPos = Toast._xPos(e);
toast.velocityX = toast.deltaX / (Date.now() - toast.time);
toast.time = Date.now();
let totalDeltaX = toast.xPos - toast.startingXPos;
let activationDistance = toast.el.offsetWidth * toast.options.activationPercent;
toast.el.style.transform = `translateX(${totalDeltaX}px)`;
toast.el.style.opacity = 1 - Math.abs(totalDeltaX / activationDistance);
}
}
/**
* End drag handler
*/
static _onDragEnd() {
if (!!Toast._draggedToast) {
let toast = Toast._draggedToast;
toast.panning = false;
toast.el.classList.remove('panning');
let totalDeltaX = toast.xPos - toast.startingXPos;
let activationDistance = toast.el.offsetWidth * toast.options.activationPercent;
let shouldBeDismissed = Math.abs(totalDeltaX) > activationDistance || toast.velocityX > 1;
// Remove toast
if (shouldBeDismissed) {
toast.wasSwiped = true;
toast.dismiss();
// Animate toast back to original position
} else {
toast.el.style.transition = 'transform .2s, opacity .2s';
toast.el.style.transform = '';
toast.el.style.opacity = '';
}
Toast._draggedToast = null;
}
}
/**
* Get x position of mouse or touch event
* @param {Event} e
*/
static _xPos(e) {
if (e.targetTouches && e.targetTouches.length >= 1) {
return e.targetTouches[0].clientX;
}
// mouse event
return e.clientX;
}
/**
* Remove all toasts
*/
static dismissAll() {
for (let toastIndex in Toast._toasts) {
Toast._toasts[toastIndex].dismiss();
}
}
/**
* Create toast and append it to toast container
*/
_createToast() {
let toast = document.createElement('div');
toast.classList.add('toast');
// Add custom classes onto toast
if (!!this.options.classes.length) {
$(toast).addClass(this.options.classes);
}
// Set content
if (
typeof HTMLElement === 'object'
? this.message instanceof HTMLElement
: this.message &&
typeof this.message === 'object' &&
this.message !== null &&
this.message.nodeType === 1 &&
typeof this.message.nodeName === 'string'
) {
toast.appendChild(this.message);
// Check if it is jQuery object
} else if (!!this.message.jquery) {
$(toast).append(this.message[0]);
// Insert as html;
} else {
toast.innerHTML = this.message;
}
// Append toasft
Toast._container.appendChild(toast);
return toast;
}
/**
* Animate in toast
*/
_animateIn() {
// Animate toast in
anim({
targets: this.el,
top: 0,
opacity: 1,
duration: this.options.inDuration,
easing: 'easeOutCubic'
});
}
/**
* Create setInterval which automatically removes toast when timeRemaining >= 0
* has been reached
*/
_setTimer() {
if (this.timeRemaining !== Infinity) {
this.counterInterval = setInterval(() => {
// If toast is not being dragged, decrease its time remaining
if (!this.panning) {
this.timeRemaining -= 20;
}
// Animate toast out
if (this.timeRemaining <= 0) {
this.dismiss();
}
}, 20);
}
}
/**
* Dismiss toast with animation
*/
dismiss() {
window.clearInterval(this.counterInterval);
let activationDistance = this.el.offsetWidth * this.options.activationPercent;
if (this.wasSwiped) {
this.el.style.transition = 'transform .05s, opacity .05s';
this.el.style.transform = `translateX(${activationDistance}px)`;
this.el.style.opacity = 0;
}
anim({
targets: this.el,
opacity: 0,
marginTop: -40,
duration: this.options.outDuration,
easing: 'easeOutExpo',
complete: () => {
// Call the optional callback
if (typeof this.options.completeCallback === 'function') {
this.options.completeCallback();
}
// Remove toast from DOM
this.$el.remove();
Toast._toasts.splice(Toast._toasts.indexOf(this), 1);
if (Toast._toasts.length === 0) {
Toast._removeContainer();
}
}
});
}
}
/**
* @static
* @memberof Toast
* @type {Array.<Toast>}
*/
Toast._toasts = [];
/**
* @static
* @memberof Toast
*/
Toast._container = null;
/**
* @static
* @memberof Toast
* @type {Toast}
*/
Toast._draggedToast = null;
M.Toast = Toast;
M.toast = function(options) {
return new Toast(options);
};
})(cash, M.anime); | PypiClean |
/BifacialSimu-1.2.0-py3-none-any.whl/BifacialSimu_src/Vendor/bifacial_radiance/spectral_utils.py | import numpy as np
import pandas as pd
from collections.abc import Iterable
import os
from scipy import integrate
class spectral_property(object):
"""
WRITE DOCSTRING HERE
"""
def load_file(filepath):
with open(filepath, 'r') as infile:
meta = next(infile)[:-1]
data = pd.read_csv(infile)
return spectral_property(data['value'], data['wavelength'],
interpolation=meta.split(':')[1])
def to_nm(wavelength, units):
unit_conversion = { 'nm': 1,
'um': 1000 }
# Verify units are in conversion table
if units not in unit_conversion:
print("Warning: Unknown unit specified. Options are {}.".format(
unit_conversion.keys()))
units = 'nm'
return wavelength * unit_conversion[units]
def _linear_interpolation(self, wavelength_nm):
# Find upper and lower index
upper_bound = self.data[self.data.index > wavelength_nm].index.min()
lower_bound = self.data[self.data.index < wavelength_nm].index.max()
# Determine values of surrounding indices
upper_val = self.data['value'][upper_bound]
lower_val = self.data['value'][lower_bound]
# Calculate deltas
delta_lambda = upper_bound - lower_bound
delta_val = upper_val - lower_val
return lower_val + delta_val*(wavelength_nm - lower_bound)/delta_lambda
def _nearest_interpolation(self, wavelength_nm):
# Find upper and lower index
upper_bound = self.data[self.data.index > wavelength_nm].index.min()
lower_bound = self.data[self.data.index < wavelength_nm].index.max()
# Determine which index is closer
if (upper_bound - wavelength_nm) < (wavelength_nm - lower_bound):
return self.data['value'][upper_bound]
return self.data['value'][lower_bound]
def _lower_interpolation(self, wavelength_nm):
# Find lower index
lower_bound = self.data[self.data.index < wavelength_nm].index.max()
return self.data['value'][lower_bound]
def _upper_interpolation(self, wavelength_nm):
# Find upper index
upper_bound = self.data[self.data.index > wavelength_nm].index.min()
return self.data['value'][upper_bound]
interpolation_methods = {
'linear': _linear_interpolation,
'nearest': _nearest_interpolation,
'lower': _lower_interpolation,
'upper': _upper_interpolation
}
def __init__(self, values, index, index_units='nm', interpolation=None):
# Verify lengths match
if len(values) != len(index):
print("Warning: Length of values and index must match.")
return
# Convert inputs to list
values = [ val for val in values ]
index = [ spectral_property.to_nm(idx, index_units) for idx in index ]
# Create DataFrame
self.data = pd.DataFrame()
self.data['value'] = values
self.data['wavelength'] = index
self.data = self.data.set_index('wavelength')
self.interpolation = None
if interpolation in spectral_property.interpolation_methods:
self.interpolation = \
spectral_property.interpolation_methods[interpolation]
self.interpolation_type = interpolation
elif interpolation:
print("Warning: Specified interpolation type unknown.")
def _get_single(self, wavelength, units):
# Convert wavelength to nm
wavelength = spectral_property.to_nm(wavelength, units)
if wavelength in self.data.index:
# If the value for that wavelength is known, return it
return self.data['value'][wavelength]
elif self.interpolation:
# Check wavelength is within range
if wavelength < self.data.index.min() or \
wavelength > self.data.index.max():
print("Warning: Requested wavelength outside spectrum.")
return None
# Return interpolated value
return self.interpolation(self, wavelength)
return None
def __getitem__(self, wavelength, units='nm'):
if isinstance(wavelength, Iterable):
return np.array([ self._get_single(wl, units) for wl in wavelength ])
return self._get_single(wavelength, units)
def to_file(self, filepath, append=False):
mode = 'w'
if append:
mode = 'a'
with open(filepath, mode) as outfile:
outfile.write(f"interpolation:{self.interpolation_type}\n")
self.data.to_csv(outfile)
def range(self):
# Find upper and lower index
upper_bound = self.data.index.max()
lower_bound = self.data.index.min()
return (lower_bound, upper_bound)
def scale_values(self, scaling_factor):
self.data['value'] *= scaling_factor
def spectral_albedo_smarts(zen, azm, material, min_wavelength=300,
max_wavelength=4000):
import pySMARTS
smarts_res = pySMARTS.SMARTSSpectraZenAzm('30 31', str(zen), str(azm), material,
min_wvl=str(min_wavelength),
max_wvl=str(max_wavelength))
return spectral_property(smarts_res['Zonal_ground_reflectance'],
smarts_res['Wvlgth'], interpolation='linear')
def spectral_irradiance_smarts(zen, azm, material='LiteSoil', min_wavelength=300,
max_wavelength=4000):
import pySMARTS
smarts_res = pySMARTS.SMARTSSpectraZenAzm('2 3 4', str(zen), str(azm),
material=material,
min_wvl=str(min_wavelength),
max_wvl=str(max_wavelength))
dni_spectrum = spectral_property(smarts_res['Direct_normal_irradiance'],
smarts_res['Wvlgth'], interpolation='linear')
dhi_spectrum = spectral_property(smarts_res['Difuse_horizn_irradiance'],
smarts_res['Wvlgth'], interpolation='linear')
ghi_spectrum = spectral_property(smarts_res['Global_horizn_irradiance'],
smarts_res['Wvlgth'], interpolation='linear')
return (dni_spectrum, dhi_spectrum, ghi_spectrum)
def spectral_irradiance_smarts_SRRL(YEAR, MONTH, DAY, HOUR, ZONE,
LATIT, LONGIT, ALTIT,
RH, TAIR, SEASON, TDAY, SPR, W,
TILT, WAZIM, HEIGHT,
ALPHA1, ALPHA2, OMEGL, GG, BETA, TAU5,
RHOG, material,
IOUT='2 3 4', min_wvl='280', max_wvl='4000'):
import pySMARTS
smarts_res = pySMARTS.SMARTSSRRL(IOUT=IOUT, YEAR=YEAR,MONTH=MONTH,DAY=DAY,HOUR=HOUR, ZONE=ZONE,
LATIT=LATIT, LONGIT=LONGIT, ALTIT=ALTIT,
RH=RH, TAIR=TAIR, SEASON=SEASON, TDAY=TDAY, SPR=SPR, W=W,
TILT=TILT, WAZIM=WAZIM, HEIGHT=HEIGHT,
ALPHA1 = ALPHA1, ALPHA2 = ALPHA2, OMEGL = OMEGL,
GG = GG, BETA = BETA, TAU5= TAU5,
RHOG=RHOG, material=material,
min_wvl=min_wvl, max_wvl=max_wvl)
dni_spectrum = spectral_property(smarts_res[smarts_res.keys()[1]],
smarts_res['Wvlgth'], interpolation='linear')
dhi_spectrum = spectral_property(smarts_res[smarts_res.keys()[2]],
smarts_res['Wvlgth'], interpolation='linear')
ghi_spectrum = spectral_property(smarts_res[smarts_res.keys()[3]],
smarts_res['Wvlgth'], interpolation='linear')
return (dni_spectrum, dhi_spectrum, ghi_spectrum)
def spectral_albedo_smarts_SRRL(YEAR, MONTH, DAY, HOUR, ZONE,
LATIT, LONGIT, ALTIT,
RH, TAIR, SEASON, TDAY, SPR, W,
TILT, WAZIM, HEIGHT,
ALPHA1, ALPHA2, OMEGL, GG, BETA, TAU5,
RHOG, material,
IOUT='30 31', min_wvl='280', max_wvl='4000'):
import pySMARTS
smarts_res = pySMARTS.SMARTSSRRL(IOUT=IOUT, YEAR=YEAR,MONTH=MONTH,DAY=DAY,HOUR=HOUR, ZONE=ZONE,
LATIT=LATIT, LONGIT=LONGIT, ALTIT=ALTIT,
RH=RH, TAIR=TAIR, SEASON=SEASON, TDAY=TDAY, SPR=SPR, W=W,
TILT=TILT, WAZIM=WAZIM, HEIGHT=HEIGHT,
ALPHA1 = ALPHA1, ALPHA2 = ALPHA2, OMEGL = OMEGL,
GG = GG, BETA = BETA, TAU5= TAU5,
RHOG=RHOG, material=material,
min_wvl=min_wvl, max_wvl=max_wvl)
return spectral_property(smarts_res['Zonal_ground_reflectance'],
smarts_res['Wvlgth'], interpolation='linear')
def generate_spectra(idx, metdata, material=None, spectra_folder=None, scale_spectra=False,
scale_albedo=False, scale_albedo_nonspectral_sim=False):
"""
generate spectral curve for particular material. Requires pySMARTS
Parameters
----------
idx : int
index of the metdata file to run pySMARTS.
metdata : bifacial_radiance MetObj
DESCRIPTION.
material : string, optional
type of material for spectral simulation. Options include: Grass,
Gravel, etc. The default is None.
spectra_folder : path, optional
location to save spectral data. The default is None.
scale_spectra : bool, optional
DESCRIPTION. The default is False.
scale_albedo : bool, optional
DESCRIPTION. The default is False.
scale_albedo_nonspectral_sim : bool, optional
DESCRIPTION. The default is False.
Returns
-------
spectral_alb : spectral_property class
spectral_alb.data: dataframe with frequency and magnitude data.
spectral_dni : spectral_property class
spectral_dni.data: dataframe with frequency and magnitude data.
spectral_dhi : spectral_property class
spectral_dhi.data: dataframe with frequency and magnitude data.
"""
if material is None:
material = 'Gravel'
# Extract data from metdata
dni = metdata.dni[idx]
dhi = metdata.dhi[idx]
ghi = metdata.ghi[idx]
try:
alb = metdata.albedo[idx]
except TypeError:
raise Exception("Error - No 'metdata.albedo' value passed.")
solpos = metdata.solpos.iloc[idx]
zen = float(solpos.zenith)
azm = float(solpos.azimuth) - 180
#lat = metdata.latitude
#lon = metdata.longitude
#elev = metdata.elevation / 1000
#t = metdata.datetime[idx]
# Verify sun up
if zen > 90:
print("Sun below horizon. Skipping.")
return None
# Define file suffix
# -- CHANGE --
suffix = f'_{idx:04}.txt'
# Generate/Load dni and dhi
dni_file = os.path.join(spectra_folder, "dni"+suffix)
dhi_file = os.path.join(spectra_folder, "dhi"+suffix)
ghi_file = os.path.join(spectra_folder, "ghi"+suffix)
spectral_dni, spectral_dhi, spectral_ghi = spectral_irradiance_smarts(zen, azm, min_wavelength=300)
# SCALING:
# If specifed, scale the irradiance spectra based on their respective
# measured value.
if scale_spectra:
dni_scale = dni / spectral_dni.data.apply(lambda g: integrate.trapz(spectral_dni.data.value, x=spectral_dni.data.index))
dhi_scale = dhi / spectral_dhi.data.apply(lambda g: integrate.trapz(spectral_dhi.data.value, x=spectral_dhi.data.index))
ghi_scale = ghi / spectral_ghi.data.apply(lambda g: integrate.trapz(spectral_ghi.data.value, x=spectral_ghi.data.index))
# dni_scale = dni / (10*np.sum(spectral_dni[range(280, 4000, 10)]))
# dhi_scale = dhi / (10*np.sum(spectral_dhi[range(280, 4000, 10)]))
# ghi_scale = ghi / (10*np.sum(spectral_ghi[range(280, 2501, 10)]))
spectral_dni.scale_values(dni_scale.value)
spectral_dhi.scale_values(dhi_scale.value)
spectral_ghi.scale_values(ghi_scale.value)
# Write irradiance spectra
#'''
spectral_dni.to_file(dni_file)
spectral_dhi.to_file(dhi_file)
spectral_ghi.to_file(ghi_file)
#'''
# Generate/Load albedo
alb_file = os.path.join(spectra_folder, "alb"+suffix)
if material == 'Seasonal':
MONTH = metdata.datetime[idx].month
if 4 <= MONTH <= 7:
material = 'Grass'
else:
material = 'DryGrass'
spectral_alb = spectral_albedo_smarts(zen, azm, material, min_wavelength=300)
# If specifed, scale the spectral albedo to have a mean value matching the
# measured albedo.
if scale_albedo:
# option A
denom = spectral_alb.data.value * spectral_ghi.data.value
# option B
#denom = spectral_alb.data
# TODO:
# Add test to
if alb > 1 or alb == 0:
print("albedo measured is an incorrect number, not scaling albedo generated")
else:
alb_scale = alb / denom.apply(lambda g: integrate.trapz(denom.values, x=spectral_alb.data.index))
spectral_alb.scale_values(alb_scale.values)
if scale_albedo_nonspectral_sim:
sim_alb = np.sum(spectral_alb[range(280, 2501, 10)] * spectral_ghi[range(280, 2501, 10)])/np.sum(spectral_ghi[range(280, 2501, 10)])
if alb > 1:
print("albedo measured is an incorrect number, not scaling albedo generated")
else:
alb_scale = alb / sim_alb
spectral_alb.scale_values(alb_scale)
print(alb, sim_alb, alb_scale)
# Write albedo file
spectral_alb.to_file(alb_file)
return (spectral_alb, spectral_dni, spectral_dhi) | PypiClean |
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/apex/apex/amp/opt.py | import contextlib
import warnings
from .scaler import LossScaler, master_params
from ._amp_state import maybe_print
import numpy as np
class OptimWrapper(object):
def __init__(self, optimizer, amp_handle, num_loss):
self._optimizer = optimizer
self._amp_handle = amp_handle
self._num_loss = num_loss
self._loss_idx = 0
self._skip_next = [False] * num_loss
self._loss_scaler = [LossScaler('dynamic') for _ in range(num_loss)]
@contextlib.contextmanager
def scale_loss(self, loss):
if not self._amp_handle.is_active():
yield loss
return
# When there are multiple losses per-optimizer, we need
# to save out current grad accumulation, since we won't be
# able to unscale this particulare loss once the grads are
# all mixed together.
cached_grads = []
if self._loss_idx > 0:
for p in master_params(self._optimizer):
if p.grad is not None:
cached_grads.append(p.grad.data.detach().clone())
else:
cached_grads.append(None)
self._optimizer.zero_grad()
loss_scale = self._cur_loss_scaler().loss_scale()
yield loss * loss_scale
self._cur_loss_scaler().clear_overflow_state()
self._cur_loss_scaler().unscale(
master_params(self._optimizer),
master_params(self._optimizer),
loss_scale)
self._skip_next[self._loss_idx] = self._cur_loss_scaler().update_scale()
self._loss_idx += 1
if len(cached_grads) > 0:
for p, cached_grad in zip(master_params(self._optimizer),
cached_grads):
if cached_grad is not None:
p.grad.data.add_(cached_grad)
cached_grads = []
def _cur_loss_scaler(self):
assert 0 <= self._loss_idx < self._num_loss
return self._loss_scaler[self._loss_idx]
def step(self, closure=None):
if not self._amp_handle.is_active():
return self._optimizer.step(closure=closure)
self._loss_idx = 0
for group in self._optimizer.param_groups:
for p in group['params']:
self._amp_handle.remove_cache(p)
if closure is not None:
raise NotImplementedError(
'The `closure` argument is unsupported by the amp ' +
'optimizer wrapper.')
if any(self._skip_next):
maybe_print('Gradient overflow, skipping update')
self._skip_next = [False] * self._num_loss
else:
return self._optimizer.step(closure=closure)
# Forward any attribute lookups
def __getattr__(self, attr):
return getattr(self._optimizer, attr)
# Forward all torch.optim.Optimizer methods
def __getstate__(self):
return self._optimizer.__getstate__()
def __setstate__(self):
return self._optimizer.__setstate__()
def __repr__(self):
return self._optimizer.__repr__()
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict):
return self._optimizer.load_state_dict(state_dict)
def zero_grad(self):
return self._optimizer.zero_grad()
def add_param_group(self, param_group):
return self._optimizer.add_param_group(param_group) | PypiClean |
/IPFX-1.0.8.tar.gz/IPFX-1.0.8/ipfx/x_to_nwb/conversion_utils.py | import math
from pkg_resources import get_distribution, DistributionNotFound
import os
from subprocess import Popen, PIPE
import numpy as np
from pynwb.icephys import CurrentClampStimulusSeries, VoltageClampStimulusSeries, CurrentClampSeries, \
VoltageClampSeries, IZeroClampSeries
try:
from pynwb.form.backends.hdf5.h5_utils import H5DataIO
except ModuleNotFoundError:
from hdmf.backends.hdf5.h5_utils import H5DataIO
PLACEHOLDER = "PLACEHOLDER"
V_CLAMP_MODE = 0
I_CLAMP_MODE = 1
I0_CLAMP_MODE = 2
# TODO Use the pint package if doing that manually gets too involved
def parseUnit(unitString):
"""
Split a SI unit string with prefix into the base unit and the prefix (as number).
"""
if unitString == "pA":
return 1e-12, "A"
elif unitString == "nA":
return 1e-9, "A"
elif unitString == "A":
return 1.0, "A"
elif unitString == "mV":
return 1e-3, "V"
elif unitString == "V":
return 1.0, "V"
else:
raise ValueError(f"Unsupported unit string {unitString}.")
def getStimulusSeriesClass(clampMode):
"""
Return the appropriate pynwb stimulus class for the given clamp mode.
"""
if clampMode == V_CLAMP_MODE:
return VoltageClampStimulusSeries
elif clampMode == I_CLAMP_MODE:
return CurrentClampStimulusSeries
elif clampMode == I0_CLAMP_MODE:
return None
else:
raise ValueError(f"Unsupported clamp mode {clampMode}.")
def getAcquiredSeriesClass(clampMode):
"""
Return the appropriate pynwb acquisition class for the given clamp mode.
"""
if clampMode == V_CLAMP_MODE:
return VoltageClampSeries
elif clampMode == I_CLAMP_MODE:
return CurrentClampSeries
elif clampMode == I0_CLAMP_MODE:
return IZeroClampSeries
else:
raise ValueError(f"Unsupported clamp mode {clampMode}.")
def createSeriesName(prefix, number, total):
"""
Format a unique series group name of the form `prefix_XXX` where `XXX` is
the formatted `number` long enough for `total` number of groups.
"""
return f"{prefix}_{number:0{math.ceil(math.log(total, 10))}d}", number + 1
def createCycleID(numbers, total):
"""
Create an integer from all numbers which is unique for that combination.
:param: numbers:
Iterable holding non-negative integer numbers
:param: total:
Total number of TimeSeries written to the NWB file
"""
assert total > 0, f"Unexpected value for total {total}"
places = max(math.ceil(math.log(total, 10)), 1)
result = 0
for idx, n in enumerate(reversed(numbers)):
assert n >= 0, f"Unexpected value {n} at index {idx}"
assert n < 10**places, f"Unexpected value {n} which is larger than {total}"
result += n * (10**(idx * places))
return result
def convertDataset(array, compression):
"""
Convert to FP32 and optionally request compression for the given array and return it wrapped.
"""
data = array.astype(np.float32)
if compression:
return H5DataIO(data=data, compression=True, chunks=True, shuffle=True, fletcher32=True)
return data
def getPackageInfo():
"""
Return a dictionary with version information for the allensdk package
"""
def get_git_version():
"""
Returns the project version as derived by git.
"""
path = os.path.dirname(__file__)
branch = Popen(f'git -C "{path}" rev-parse --abbrev-ref HEAD', stdout=PIPE,
shell=True).stdout.read().rstrip().decode('ascii')
rev = Popen(f'git -C "{path}" describe --always --tags', stdout=PIPE,
shell=True).stdout.read().rstrip().decode('ascii')
if branch.startswith('fatal') or rev.startswith('fatal'):
raise ValueError("Could not determine git version")
return f"({branch}) {rev}"
try:
package_version = get_distribution('allensdk').version
except DistributionNotFound: # not installed as a package
package_version = None
try:
git_version = get_git_version()
except ValueError: # not in a git repostitory
git_version = None
version_info = {"repo": "https://github.com/AllenInstitute/ipfx",
"package_version": "Unknown",
"git_revision": "Unknown"}
if package_version:
version_info["package_version"] = package_version
if git_version:
version_info["git_revision"] = git_version
return version_info
def getStimulusRecordIndex(sweep):
return sweep.StimCount - 1
def getChannelRecordIndex(pgf, sweep, trace):
"""
Given a pgf node, a SweepRecord and TraceRecord this returns the
corresponding `ChannelRecordStimulus` node as index.
"""
stimRec = pgf[getStimulusRecordIndex(sweep)]
for idx, channelRec in enumerate(stimRec):
if channelRec.AdcChannel == trace.AdcChannel:
return idx
return None
def clampModeToString(clampMode):
"""
Return the given clamp mode as human readable string. Useful for error
messages.
"""
if clampMode == I_CLAMP_MODE:
return "I_CLAMP_MODE"
elif clampMode == V_CLAMP_MODE:
return "V_CLAMP_MODE"
elif clampMode == I0_CLAMP_MODE:
return "I0_CLAMP_MODE"
else:
raise ValueError(f"Unknown clampMode {clampMode}") | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/lib/editor/editor.py |
import os
import time
import wx
from . import selection
from . import images
#----------------------------
def ForceBetween(min, val, max):
if val > max:
return max
if val < min:
return min
return val
def LineTrimmer(lineOfText):
if len(lineOfText) == 0:
return ""
elif lineOfText[-1] == '\r':
return lineOfText[:-1]
else:
return lineOfText
def LineSplitter(text):
return map (LineTrimmer, text.split('\n'))
#----------------------------
class Scroller:
def __init__(self, parent):
self.parent = parent
self.ow = 0
self.oh = 0
self.ox = 0
self.oy = 0
def SetScrollbars(self, fw, fh, w, h, x, y):
if (self.ow != w or self.oh != h or self.ox != x or self.oy != y):
self.parent.SetScrollbars(fw, fh, w, h, x, y)
self.ow = w
self.oh = h
self.ox = x
self.oy = y
#----------------------------------------------------------------------
class Editor(wx.ScrolledWindow):
def __init__(self, parent, id,
pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ScrolledWindow.__init__(self, parent, id,
pos, size,
style|wx.WANTS_CHARS)
self.isDrawing = False
self.InitCoords()
self.InitFonts()
self.SetColors()
self.MapEvents()
self.LoadImages()
self.InitDoubleBuffering()
self.InitScrolling()
self.SelectOff()
self.SetFocus()
self.SetText([""])
self.SpacesPerTab = 4
##------------------ Init stuff
def InitCoords(self):
self.cx = 0
self.cy = 0
self.oldCx = 0
self.oldCy = 0
self.sx = 0
self.sy = 0
self.sw = 0
self.sh = 0
self.sco_x = 0
self.sco_y = 0
def MapEvents(self):
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_SCROLLWIN, self.OnScroll)
self.Bind(wx.EVT_CHAR, self.OnChar)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
##------------------- Platform-specific stuff
def NiceFontForPlatform(self):
if wx.Platform == "__WXMSW__":
font = wx.Font(10, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
else:
font = wx.Font(12, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)
return font
def UnixKeyHack(self, key):
#
# this will be obsolete when we get the new wxWindows patch
#
# 12/14/03 - jmg
#
# Which patch? I don't know if this is needed, but I don't know
# why it's here either. Play it safe; leave it in.
#
if key <= 26:
key += ord('a') - 1
return key
##-------------------- UpdateView/Cursor code
def OnSize(self, event):
self.AdjustScrollbars()
self.SetFocus()
def SetCharDimensions(self):
# TODO: We need a code review on this. It appears that Linux
# improperly reports window dimensions when the scrollbar's there.
self.bw, self.bh = self.GetClientSize()
if wx.Platform == "__WXMSW__":
self.sh = int(self.bh / self.fh)
self.sw = int(self.bw / self.fw) - 1
else:
self.sh = int(self.bh / self.fh)
if self.LinesInFile() >= self.sh:
self.bw = self.bw - wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)
self.sw = int(self.bw / self.fw) - 1
self.sw = int(self.bw / self.fw) - 1
if self.CalcMaxLineLen() >= self.sw:
self.bh = self.bh - wx.SystemSettings.GetMetric(wx.SYS_HSCROLL_Y)
self.sh = int(self.bh / self.fh)
def UpdateView(self, dc = None):
if dc is None:
dc = wx.ClientDC(self)
if dc.IsOk():
self.SetCharDimensions()
self.KeepCursorOnScreen()
self.DrawSimpleCursor(0,0, dc, True)
self.Draw(dc)
def OnPaint(self, event):
dc = wx.PaintDC(self)
if self.isDrawing:
return
self.isDrawing = True
self.UpdateView(dc)
wx.CallAfter(self.AdjustScrollbars)
self.isDrawing = False
def OnEraseBackground(self, evt):
pass
##-------------------- Drawing code
def InitFonts(self):
dc = wx.ClientDC(self)
self.font = self.NiceFontForPlatform()
dc.SetFont(self.font)
self.fw = dc.GetCharWidth()
self.fh = dc.GetCharHeight()
def SetColors(self):
self.fgColor = wx.BLACK
self.bgColor = wx.WHITE
self.selectColor = wx.Colour(238, 220, 120) # r, g, b = emacsOrange
def InitDoubleBuffering(self):
pass
def DrawEditText(self, t, x, y, dc):
dc.DrawText(t, x * self.fw, y * self.fh)
def DrawLine(self, line, dc):
if self.IsLine(line):
l = line
t = self.lines[l]
dc.SetTextForeground(self.fgColor)
fragments = selection.Selection(
self.SelectBegin, self.SelectEnd,
self.sx, self.sw, line, t)
x = 0
for (data, selected) in fragments:
if selected:
dc.SetTextBackground(self.selectColor)
if x == 0 and len(data) == 0 and len(fragments) == 1:
data = ' '
else:
dc.SetTextBackground(self.bgColor)
self.DrawEditText(data, x, line - self.sy, dc)
x += len(data)
def Draw(self, odc=None):
if not odc:
odc = wx.ClientDC(self)
dc = wx.BufferedDC(odc)
if dc.IsOk():
dc.SetFont(self.font)
dc.SetBackgroundMode(wx.SOLID)
dc.SetTextBackground(self.bgColor)
dc.SetTextForeground(self.fgColor)
dc.SetBackground(wx.Brush(self.bgColor))
dc.Clear()
for line in range(self.sy, self.sy + self.sh):
self.DrawLine(line, dc)
if len(self.lines) < self.sh + self.sy:
self.DrawEofMarker(dc)
self.DrawCursor(dc)
##------------------ eofMarker stuff
def LoadImages(self):
self.eofMarker = images.EofImage.GetBitmap()
def DrawEofMarker(self,dc):
x = 0
y = (len(self.lines) - self.sy) * self.fh
hasTransparency = 1
dc.DrawBitmap(self.eofMarker, x, y, hasTransparency)
##------------------ cursor-related functions
def DrawCursor(self, dc = None):
if not dc:
dc = wx.ClientDC(self)
if (self.LinesInFile())<self.cy: #-1 ?
self.cy = self.LinesInFile()-1
s = self.lines[self.cy]
x = self.cx - self.sx
y = self.cy - self.sy
self.DrawSimpleCursor(x, y, dc)
def DrawSimpleCursor(self, xp, yp, dc = None, old=False):
if not dc:
dc = wx.ClientDC(self)
if old:
xp = self.sco_x
yp = self.sco_y
szx = self.fw
szy = self.fh
x = xp * szx
y = yp * szy
dc.Blit(x,y, szx,szy, dc, x,y, wx.XOR)
self.sco_x = xp
self.sco_y = yp
##-------- Enforcing screen boundaries, cursor movement
def CalcMaxLineLen(self):
"""get length of longest line on screen"""
maxlen = 0
for line in self.lines[self.sy:self.sy+self.sh]:
if len(line) >maxlen:
maxlen = len(line)
return maxlen
def KeepCursorOnScreen(self):
self.sy = ForceBetween(max(0, self.cy-self.sh), self.sy, self.cy)
self.sx = ForceBetween(max(0, self.cx-self.sw), self.sx, self.cx)
self.AdjustScrollbars()
def HorizBoundaries(self):
self.SetCharDimensions()
maxLineLen = self.CalcMaxLineLen()
self.sx = ForceBetween(0, self.sx, max(self.sw, maxLineLen - self.sw + 1))
self.cx = ForceBetween(self.sx, self.cx, self.sx + self.sw - 1)
def VertBoundaries(self):
self.SetCharDimensions()
self.sy = ForceBetween(0, self.sy, max(self.sh, self.LinesInFile() - self.sh + 1))
self.cy = ForceBetween(self.sy, self.cy, self.sy + self.sh - 1)
def cVert(self, num):
self.cy = self.cy + num
self.cy = ForceBetween(0, self.cy, self.LinesInFile() - 1)
self.sy = ForceBetween(self.cy - self.sh + 1, self.sy, self.cy)
self.cx = min(self.cx, self.CurrentLineLength())
def cHoriz(self, num):
self.cx = self.cx + num
self.cx = ForceBetween(0, self.cx, self.CurrentLineLength())
self.sx = ForceBetween(self.cx - self.sw + 1, self.sx, self.cx)
def AboveScreen(self, row):
return row < self.sy
def BelowScreen(self, row):
return row >= self.sy + self.sh
def LeftOfScreen(self, col):
return col < self.sx
def RightOfScreen(self, col):
return col >= self.sx + self.sw
##----------------- data structure helper functions
def GetText(self):
return self.lines
def SetText(self, lines):
self.InitCoords()
self.lines = lines
self.UnTouchBuffer()
self.SelectOff()
self.AdjustScrollbars()
self.UpdateView(None)
def IsLine(self, lineNum):
return (0<=lineNum) and (lineNum<self.LinesInFile())
def GetTextLine(self, lineNum):
if self.IsLine(lineNum):
return self.lines[lineNum]
return ""
def SetTextLine(self, lineNum, text):
if self.IsLine(lineNum):
self.lines[lineNum] = text
def CurrentLineLength(self):
return len(self.lines[self.cy])
def LinesInFile(self):
return len(self.lines)
def UnTouchBuffer(self):
self.bufferTouched = False
def BufferWasTouched(self):
return self.bufferTouched
def TouchBuffer(self):
self.bufferTouched = True
##-------------------------- Mouse scroll timing functions
def InitScrolling(self):
# we don't rely on the windows system to scroll for us; we just
# redraw the screen manually every time
self.EnableScrolling(False, False)
self.nextScrollTime = 0
self.SCROLLDELAY = 0.050 # seconds
self.scrollTimer = wx.Timer(self)
self.scroller = Scroller(self)
def CanScroll(self):
if time.time() > self.nextScrollTime:
self.nextScrollTime = time.time() + self.SCROLLDELAY
return True
else:
return False
def SetScrollTimer(self):
oneShot = True
self.scrollTimer.Start(1000*self.SCROLLDELAY/2, oneShot)
self.Bind(wx.EVT_TIMER, self.OnTimer)
def OnTimer(self, event):
screenX, screenY = wx.GetMousePosition()
x, y = self.ScreenToClient((screenX, screenY))
self.MouseToRow(y)
self.MouseToCol(x)
self.SelectUpdate()
##-------------------------- Mouse off screen functions
def HandleAboveScreen(self, row):
self.SetScrollTimer()
if self.CanScroll():
row = self.sy - 1
row = max(0, row)
self.cy = row
def HandleBelowScreen(self, row):
self.SetScrollTimer()
if self.CanScroll():
row = self.sy + self.sh
row = min(row, self.LinesInFile() - 1)
self.cy = row
def HandleLeftOfScreen(self, col):
self.SetScrollTimer()
if self.CanScroll():
col = self.sx - 1
col = max(0,col)
self.cx = col
def HandleRightOfScreen(self, col):
self.SetScrollTimer()
if self.CanScroll():
col = self.sx + self.sw
col = min(col, self.CurrentLineLength())
self.cx = col
##------------------------ mousing functions
def MouseToRow(self, mouseY):
row = self.sy + int(mouseY / self.fh)
if self.AboveScreen(row):
self.HandleAboveScreen(row)
elif self.BelowScreen(row):
self.HandleBelowScreen(row)
else:
self.cy = min(row, self.LinesInFile() - 1)
def MouseToCol(self, mouseX):
col = self.sx + int(mouseX / self.fw)
if self.LeftOfScreen(col):
self.HandleLeftOfScreen(col)
elif self.RightOfScreen(col):
self.HandleRightOfScreen(col)
else:
self.cx = min(col, self.CurrentLineLength())
def MouseToCursor(self, event):
self.MouseToRow(event.GetY())
self.MouseToCol(event.GetX())
def OnMotion(self, event):
if event.LeftIsDown() and self.HasCapture():
self.Selecting = True
self.MouseToCursor(event)
self.SelectUpdate()
def OnLeftDown(self, event):
self.MouseToCursor(event)
self.SelectBegin = (self.cy, self.cx)
self.SelectEnd = None
self.UpdateView()
self.CaptureMouse()
self.SetFocus()
def OnLeftUp(self, event):
if not self.HasCapture():
return
if self.SelectEnd is None:
self.OnClick()
else:
self.Selecting = False
self.SelectNotify(False, self.SelectBegin, self.SelectEnd)
self.ReleaseMouse()
self.scrollTimer.Stop()
#------------------------- Scrolling
def HorizScroll(self, event, eventType):
maxLineLen = self.CalcMaxLineLen()
if eventType == wx.wxEVT_SCROLLWIN_LINEUP:
self.sx -= 1
elif eventType == wx.wxEVT_SCROLLWIN_LINEDOWN:
self.sx += 1
elif eventType == wx.wxEVT_SCROLLWIN_PAGEUP:
self.sx -= self.sw
elif eventType == wx.wxEVT_SCROLLWIN_PAGEDOWN:
self.sx += self.sw
elif eventType == wx.wxEVT_SCROLLWIN_TOP:
self.sx = self.cx = 0
elif eventType == wx.wxEVT_SCROLLWIN_BOTTOM:
self.sx = maxLineLen - self.sw
self.cx = maxLineLen
else:
self.sx = event.GetPosition()
self.HorizBoundaries()
def VertScroll(self, event, eventType):
if eventType == wx.wxEVT_SCROLLWIN_LINEUP:
self.sy -= 1
elif eventType == wx.wxEVT_SCROLLWIN_LINEDOWN:
self.sy += 1
elif eventType == wx.wxEVT_SCROLLWIN_PAGEUP:
self.sy -= self.sh
elif eventType == wx.wxEVT_SCROLLWIN_PAGEDOWN:
self.sy += self.sh
elif eventType == wx.wxEVT_SCROLLWIN_TOP:
self.sy = self.cy = 0
elif eventType == wx.wxEVT_SCROLLWIN_BOTTOM:
self.sy = self.LinesInFile() - self.sh
self.cy = self.LinesInFile()
else:
self.sy = event.GetPosition()
self.VertBoundaries()
def OnScroll(self, event):
dir = event.GetOrientation()
eventType = event.GetEventType()
if dir == wx.HORIZONTAL:
self.HorizScroll(event, eventType)
else:
self.VertScroll(event, eventType)
self.UpdateView()
def AdjustScrollbars(self):
if self:
for i in range(2):
self.SetCharDimensions()
self.scroller.SetScrollbars(
self.fw, self.fh,
self.CalcMaxLineLen()+3, max(self.LinesInFile()+1, self.sh),
self.sx, self.sy)
#------------ backspace, delete, return
def BreakLine(self, event):
if self.IsLine(self.cy):
t = self.lines[self.cy]
self.lines = self.lines[:self.cy] + [t[:self.cx],t[self.cx:]] + self.lines[self.cy+1:]
self.cVert(1)
self.cx = 0
self.TouchBuffer()
def InsertChar(self,char):
if self.IsLine(self.cy):
t = self.lines[self.cy]
t = t[:self.cx] + char + t[self.cx:]
self.SetTextLine(self.cy, t)
self.cHoriz(1)
self.TouchBuffer()
def JoinLines(self):
t1 = self.lines[self.cy]
t2 = self.lines[self.cy+1]
self.cx = len(t1)
self.lines = self.lines[:self.cy] + [t1 + t2] + self.lines[self.cy+2:]
self.TouchBuffer()
def DeleteChar(self,x,y,oldtext):
newtext = oldtext[:x] + oldtext[x+1:]
self.SetTextLine(y, newtext)
self.TouchBuffer()
def BackSpace(self, event):
t = self.GetTextLine(self.cy)
if self.cx>0:
self.DeleteChar(self.cx-1,self.cy,t)
self.cHoriz(-1)
self.TouchBuffer()
elif self.cx == 0:
if self.cy > 0:
self.cy -= 1
self.JoinLines()
self.TouchBuffer()
else:
wx.Bell()
def Delete(self, event):
t = self.GetTextLine(self.cy)
if self.cx<len(t):
self.DeleteChar(self.cx,self.cy,t)
self.TouchBuffer()
else:
if self.cy < len(self.lines) - 1:
self.JoinLines()
self.TouchBuffer()
def Escape(self, event):
self.SelectOff()
def TabKey(self, event):
numSpaces = self.SpacesPerTab - (self.cx % self.SpacesPerTab)
self.SingleLineInsert(' ' * numSpaces)
##----------- selection routines
def SelectUpdate(self):
self.SelectEnd = (self.cy, self.cx)
self.SelectNotify(self.Selecting, self.SelectBegin, self.SelectEnd)
self.UpdateView()
def NormalizedSelect(self):
(begin, end) = (self.SelectBegin, self.SelectEnd)
(bRow, bCol) = begin
(eRow, eCol) = end
if (bRow < eRow):
return (begin, end)
elif (eRow < bRow):
return (end, begin)
else:
if (bCol < eCol):
return (begin, end)
else:
return (end, begin)
def FindSelection(self):
if self.SelectEnd is None or self.SelectBegin is None:
wx.Bell()
return None
(begin, end) = self.NormalizedSelect()
(bRow, bCol) = begin
(eRow, eCol) = end
return (bRow, bCol, eRow, eCol)
def SelectOff(self):
self.SelectBegin = None
self.SelectEnd = None
self.Selecting = False
self.SelectNotify(False,None,None)
def CopySelection(self, event):
selection = self.FindSelection()
if selection is None:
return
(bRow, bCol, eRow, eCol) = selection
if bRow == eRow:
self.SingleLineCopy(bRow, bCol, eCol)
else:
self.MultipleLineCopy(bRow, bCol, eRow, eCol)
def OnCopySelection(self, event):
self.CopySelection(event)
self.SelectOff()
def CopyToClipboard(self, linesOfText):
do = wx.TextDataObject()
do.SetText(os.linesep.join(linesOfText))
wx.TheClipboard.Open()
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
def SingleLineCopy(self, Row, bCol, eCol):
Line = self.GetTextLine(Row)
self.CopyToClipboard([Line[bCol:eCol]])
def MultipleLineCopy(self, bRow, bCol, eRow, eCol):
bLine = self.GetTextLine(bRow)[bCol:]
eLine = self.GetTextLine(eRow)[:eCol]
self.CopyToClipboard([bLine] + [l for l in self.lines[bRow + 1:eRow]] + [eLine])
def OnDeleteSelection(self, event):
selection = self.FindSelection()
if selection is None:
return
(bRow, bCol, eRow, eCol) = selection
if bRow == eRow:
self.SingleLineDelete(bRow, bCol, eCol)
else:
self.MultipleLineDelete(bRow, bCol, eRow, eCol)
self.TouchBuffer()
self.cy = bRow
self.cx = bCol
self.SelectOff()
self.UpdateView()
def SingleLineDelete(self, Row, bCol, eCol):
ModLine = self.GetTextLine(Row)
ModLine = ModLine[:bCol] + ModLine[eCol:]
self.SetTextLine(Row,ModLine)
def MultipleLineDelete(self, bRow, bCol, eRow, eCol):
bLine = self.GetTextLine(bRow)
eLine = self.GetTextLine(eRow)
ModLine = bLine[:bCol] + eLine[eCol:]
self.lines[bRow:eRow + 1] = [ModLine]
def OnPaste(self, event):
do = wx.TextDataObject()
wx.TheClipboard.Open()
success = wx.TheClipboard.GetData(do)
wx.TheClipboard.Close()
if success:
pastedLines = LineSplitter(do.GetText())
else:
wx.Bell()
return
if len(pastedLines) == 0:
wx.Bell()
return
elif len(pastedLines) == 1:
self.SingleLineInsert(pastedLines[0])
else:
self.MultipleLinePaste(pastedLines)
def SingleLineInsert(self, newText):
ModLine = self.GetTextLine(self.cy)
ModLine = ModLine[:self.cx] + newText + ModLine[self.cx:]
self.SetTextLine(self.cy, ModLine)
self.cHoriz(len(newText))
self.TouchBuffer()
self.UpdateView()
def MultipleLinePaste(self, pastedLines):
FirstLine = LastLine = self.GetTextLine(self.cy)
FirstLine = FirstLine[:self.cx] + pastedLines[0]
LastLine = pastedLines[-1] + LastLine[self.cx:]
NewSlice = [FirstLine]
NewSlice += [l for l in pastedLines[1:-1]]
NewSlice += [LastLine]
self.lines[self.cy:self.cy + 1] = NewSlice
self.cy = self.cy + len(pastedLines)-1
self.cx = len(pastedLines[-1])
self.TouchBuffer()
self.UpdateView()
def OnCutSelection(self,event):
self.CopySelection(event)
self.OnDeleteSelection(event)
#-------------- Keyboard movement implementations
def MoveDown(self, event):
self.cVert(+1)
def MoveUp(self, event):
self.cVert(-1)
def MoveLeft(self, event):
if self.cx == 0:
if self.cy == 0:
wx.Bell()
else:
self.cVert(-1)
self.cx = self.CurrentLineLength()
else:
self.cx -= 1
def MoveRight(self, event):
linelen = self.CurrentLineLength()
if self.cx == linelen:
if self.cy == len(self.lines) - 1:
wx.Bell()
else:
self.cx = 0
self.cVert(1)
else:
self.cx += 1
def MovePageDown(self, event):
self.cVert(self.sh)
def MovePageUp(self, event):
self.cVert(-self.sh)
def MoveHome(self, event):
self.cx = 0
def MoveEnd(self, event):
self.cx = self.CurrentLineLength()
def MoveStartOfFile(self, event):
self.cy = 0
self.cx = 0
def MoveEndOfFile(self, event):
self.cy = len(self.lines) - 1
self.cx = self.CurrentLineLength()
#-------------- Key handler mapping tables
def SetMoveSpecialFuncs(self, action):
action[wx.WXK_DOWN] = self.MoveDown
action[wx.WXK_UP] = self.MoveUp
action[wx.WXK_LEFT] = self.MoveLeft
action[wx.WXK_RIGHT] = self.MoveRight
action[wx.WXK_PAGEDOWN] = self.MovePageDown
action[wx.WXK_PAGEUP] = self.MovePageUp
action[wx.WXK_HOME] = self.MoveHome
action[wx.WXK_END] = self.MoveEnd
def SetMoveSpecialControlFuncs(self, action):
action[wx.WXK_HOME] = self.MoveStartOfFile
action[wx.WXK_END] = self.MoveEndOfFile
def SetAltFuncs(self, action):
# subclass implements
pass
def SetControlFuncs(self, action):
action['c'] = self.OnCopySelection
action['d'] = self.OnDeleteSelection
action['v'] = self.OnPaste
action['x'] = self.OnCutSelection
def SetSpecialControlFuncs(self, action):
action[wx.WXK_INSERT] = self.OnCopySelection
def SetShiftFuncs(self, action):
action[wx.WXK_DELETE] = self.OnCutSelection
action[wx.WXK_INSERT] = self.OnPaste
def SetSpecialFuncs(self, action):
action[wx.WXK_BACK] = self.BackSpace
action[wx.WXK_DELETE] = self.Delete
action[wx.WXK_RETURN] = self.BreakLine
action[wx.WXK_ESCAPE] = self.Escape
action[wx.WXK_TAB] = self.TabKey
##-------------- Logic for key handlers
def Move(self, keySettingFunction, key, event):
action = {}
keySettingFunction(action)
if not key in action:
return False
if event.ShiftDown():
if not self.Selecting:
self.Selecting = True
self.SelectBegin = (self.cy, self.cx)
action[key](event)
self.SelectEnd = (self.cy, self.cx)
else:
action[key](event)
if self.Selecting:
self.Selecting = False
self.SelectNotify(self.Selecting, self.SelectBegin, self.SelectEnd)
self.UpdateView()
return True
def MoveSpecialKey(self, event, key):
return self.Move(self.SetMoveSpecialFuncs, key, event)
def MoveSpecialControlKey(self, event, key):
if not event.ControlDown():
return False
return self.Move(self.SetMoveSpecialControlFuncs, key, event)
def Dispatch(self, keySettingFunction, key, event):
action = {}
keySettingFunction(action)
if key in action:
action[key](event)
self.UpdateView()
return True
return False
def ModifierKey(self, key, event, modifierKeyDown, MappingFunc):
if not modifierKeyDown:
return False
key = self.UnixKeyHack(key)
try:
key = chr(key)
except Exception:
return False
if not self.Dispatch(MappingFunc, key, event):
wx.Bell()
return True
def ControlKey(self, event, key):
return self.ModifierKey(key, event, event.ControlDown(), self.SetControlFuncs)
def AltKey(self, event, key):
return self.ModifierKey(key, event, event.AltDown(), self.SetAltFuncs)
def SpecialControlKey(self, event, key):
if not event.ControlDown():
return False
if not self.Dispatch(self.SetSpecialControlFuncs, key, event):
wx.Bell()
return True
def ShiftKey(self, event, key):
if not event.ShiftDown():
return False
return self.Dispatch(self.SetShiftFuncs, key, event)
def NormalChar(self, event, key):
self.SelectOff()
# regular ascii
if not self.Dispatch(self.SetSpecialFuncs, key, event):
if (key>31) and (key<256):
self.InsertChar(chr(key))
else:
wx.Bell()
return
self.UpdateView()
self.AdjustScrollbars()
def OnChar(self, event):
key = event.GetKeyCode()
filters = [self.AltKey,
self.MoveSpecialControlKey,
self.ControlKey,
self.SpecialControlKey,
self.MoveSpecialKey,
self.ShiftKey,
self.NormalChar]
for filter in filters:
if filter(event,key):
break
return 0
#----------------------- Eliminate memory leaks
def OnDestroy(self, event):
self.mdc = None
self.odc = None
self.bgColor = None
self.fgColor = None
self.font = None
self.selectColor = None
self.scrollTimer = None
self.eofMarker = None
#-------------------- Abstract methods for subclasses
def OnClick(self):
pass
def SelectNotify(self, Selecting, SelectionBegin, SelectionEnd):
pass | PypiClean |
/CSUMMDET-1.0.23.tar.gz/CSUMMDET-1.0.23/mmdet/models/backbones/hrnet.py | import logging
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer
from .resnet import BasicBlock, Bottleneck
class HRModule(nn.Module):
""" High-Resolution Module for HRNet. In this module, every branch
has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(HRModule, self).__init__()
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(in_channels))
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
block.expansion)[1])
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False),
build_norm_layer(self.norm_cfg, in_channels[i])[1],
nn.Upsample(
scale_factor=2**(j - i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[i])[1]))
else:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[j])[1],
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
@BACKBONES.register_module
class HRNet(nn.Module):
"""HRNet backbone.
High-Resolution Representations for Labeling Pixels and Regions
arXiv: https://arxiv.org/abs/1904.04514
Args:
extra (dict): detailed configuration for each stage of HRNet.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self,
extra,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
with_cp=False,
zero_init_residual=False):
super(HRNet, self).__init__()
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
# stem net
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(
self.conv_cfg,
3,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
self.conv_cfg,
64,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = num_channels * block.expansion
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
# stage 2
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels],
num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer,
num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
num_channels_cur_layer[i])[1],
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(i + 1 - num_branches_pre):
in_channels = num_channels_pre_layer[-1]
out_channels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else in_channels
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
layers = []
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
for i in range(num_modules):
# multi_scale_output is only used for the last module
if not multiscale_output and i == num_modules - 1:
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(
HRModule(
num_branches,
block,
num_blocks,
in_channels,
num_channels,
reset_multiscale_output,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg))
return nn.Sequential(*hr_modules), in_channels
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
super(HRNet, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval() | PypiClean |
/FireSpark-0.0.26.tar.gz/FireSpark-0.0.26/README.md | FireSpark
=========
FireSpark aims to provide Magna ML/MAS team a flexible and standardized library supporting data processing, management, dataset curation, and ETL related activities.
A dataset created using FireSpark is stored in [Apache Parquet](https://parquet.apache.org/) format. On top of a Parquet
schema, FireSpark takes advantage of open source [Petastorm](https://github.com/uber/petastorm) library to support multidimensional arrays.
**This repo is at its early phase development stage. Please contact [me]([email protected]) if you have question, especially on contributing use case specification, requirements, suggestions.** :innocent:
Usage Instructions
------------
If you are not a `FireSpark` developer and you would like to have some quick instruction to get started with `FireSpark`, you can stop here and got the [FireSpar-Sandbox](https://elc-github.magna.global/Magna-Autonomous-Systems/FireSpark-Sandbox) repository to have more practical usage guide. The [FireSpar-Sandbox](https://elc-github.magna.global/Magna-Autonomous-Systems/FireSpark-Sandbox) repository is maintained in a par with the `FireSpark` library developments. If you have new feature or functionality request, please use the repository's `issues` to discuss your idea with us.
For advanced users and developers, please use the following guides:
[Installation](./docs/installation.md)
[Protobuf Definitions](./docs/mas_protobuf_def.md)
[Get Started](./docs/get_started.md)
[Development Guide](./docs/development.md)
[Lyftbag Reader](./docs/lyftbag.md)
[Dataset Stories -- Downtwon Dataset](./docs/Brampton_Dataset_Information.md)
## Development and Dataset Processing Logs
### 2020.03.19
Brampton Downtown dataset had been successfully processed both to MAS standard databse in **protobuf** message format and to ML trian/eval **Paruqet** file format dataset.
Check out and take a look at:
- MAS standard database: [mas-standard-database](https://s3.console.aws.amazon.com/s3/buckets/mas-standard-database/?region=us-east-1&tab=overview)/[protobuf_database](https://s3.console.aws.amazon.com/s3/buckets/mas-standard-database/protobuf_database/?region=us-east-1&tab=overview)/[Downtown](https://s3.console.aws.amazon.com/s3/#)
- Parquet datalake: [mas-standard-database](https://s3.console.aws.amazon.com/s3/buckets/mas-standard-database/?region=us-east-1&tab=overview)/[parquet_datalake](https://s3.console.aws.amazon.com/s3/buckets/mas-standard-database/parquet_datalake/?region=us-east-1&tab=overview)/[Downtown](https://s3.console.aws.amazon.com/s3/#)
#### Parquet Dataset Example
The result parquet files can be loaded in `PyTorch`, `Pythong`, and `Tensorflow` platform.
Demonstration of Downtown dataset from Parquet files (Front Camera):

#### Dataloader
**PyTorch**: Please refer to `/test/dataloader_torch.py` to see how to load and preprocess examples from parquet dataset.
**Tensorflow**: Please refer to `/test/dataloader_tf.py` to see how to load and preprocess examples from parquet dataset.
**FireflyML**: Please refer to `/test/dataloader_python.py` to see how to load and preprocess examples from parquet dataset. | PypiClean |
/AbPyTools-0.3.2.tar.gz/AbPyTools-0.3.2/abpytools/core/chain_collection.py | from .chain import Chain
import numpy as np
import logging
from abpytools.utils import PythonConfig, Download
import json
import os
import pandas as pd
from .helper_functions import numbering_table_sequences, numbering_table_region, numbering_table_multiindex
from operator import itemgetter
from urllib import parse
from math import ceil
from .base import CollectionBase
from ..features.composition import *
from ..analysis.distance_metrics import *
from ..core.cache import Cache
from multiprocessing import Manager, Process
from inspect import signature
from .utils import (json_ChainCollection_formatter, pb2_ChainCollection_formatter, pb2_ChainCollection_parser,
fasta_ChainCollection_parser, json_ChainCollection_parser)
from .flags import *
# setting up debugging messages
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
ipython_config = PythonConfig()
if ipython_config.ipython_info == 'notebook':
from tqdm import tqdm_notebook as tqdm # pragma: no cover
else:
from tqdm import tqdm
if BACKEND_FLAGS.HAS_PROTO:
from abpytools.core.formats import ChainCollectionProto
class ChainCollection(CollectionBase):
"""
Object containing Chain objects and to perform analysis on the ensemble.
"""
def __init__(self, antibody_objects=None, load=True, **kwargs):
"""
Args:
antibody_objects:
load:
**kwargs:
"""
if antibody_objects is None:
self.antibody_objects = []
else:
if isinstance(antibody_objects, ChainCollection):
antibody_objects = antibody_objects.antibody_objects
elif not isinstance(antibody_objects, list):
raise ValueError("Expected a list, instead got object of type {}".format(type(antibody_objects)))
elif not all(isinstance(obj, Chain) for obj in antibody_objects) and len(antibody_objects) > 0:
raise ValueError("Expected a list containing objects of type Chain")
self.antibody_objects = antibody_objects
if len(set(x.numbering_scheme for x in antibody_objects)) == 1:
self._numbering_scheme = antibody_objects[0].numbering_scheme
else:
raise ValueError("ChainCollection only support Chain objects with the same numbering scheme.")
if len(set(x.chain for x in antibody_objects)) == 1:
self._chain = antibody_objects[0].chain
elif len(set(x.chain for x in antibody_objects)) == 0:
self._chain = ''
else:
raise ValueError("ChainCollection only support Chain objects with the same chain type.")
if load:
self.load(**kwargs)
def load(self, show_progressbar=True, n_threads=4, verbose=True):
self.antibody_objects, self._chain = load_from_antibody_object(
antibody_objects=self.antibody_objects,
show_progressbar=show_progressbar,
n_threads=n_threads, verbose=verbose)
@classmethod
def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20,
verbose=True, show_progressbar=True):
if not os.path.isfile(path):
raise ValueError("File does not exist!")
with open(path, 'r') as f:
antibody_objects = fasta_ChainCollection_parser(f, numbering_scheme=numbering_scheme)
chain_collection = cls(antibody_objects=antibody_objects, load=True,
n_threads=n_threads, verbose=verbose,
show_progressbar=show_progressbar)
return chain_collection
@classmethod
def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'rb') as f:
proto_parser = ChainCollectionProto()
proto_parser.ParseFromString(f.read())
antibody_objects = pb2_ChainCollection_parser(proto_parser)
chain_collection = cls(antibody_objects=antibody_objects, load=True,
n_threads=n_threads, verbose=verbose,
show_progressbar=show_progressbar)
return chain_collection
@classmethod
def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'r') as f:
data = json.load(f)
antibody_objects = json_ChainCollection_parser(data)
chain_collection = cls(antibody_objects=antibody_objects, load=True,
n_threads=n_threads, verbose=verbose,
show_progressbar=show_progressbar)
return chain_collection
def save_to_json(self, path, update=True):
with open(os.path.join(path + '.json'), 'w') as f:
data = json_ChainCollection_formatter(self.antibody_objects)
json.dump(data, f, indent=2)
def save_to_pb2(self, path, update=True):
proto_parser = ChainCollectionProto()
try:
with open(os.path.join(path + '.pb2'), 'rb') as f:
proto_parser.ParseFromString(f.read())
except IOError:
# print("Creating new file")
pass
pb2_ChainCollection_formatter(self.antibody_objects, proto_parser)
with open(os.path.join(path + '.pb2'), 'wb') as f:
f.write(proto_parser.SerializeToString())
def save_to_fasta(self, path, update=True):
with open(os.path.join(path + '.fasta'), 'w') as f:
f.writelines(make_fasta(self.names, self.sequences))
def molecular_weights(self, monoisotopic=False):
"""
:param monoisotopic: bool whether to use monoisotopic values
:return: list
"""
return [x.ab_molecular_weight(monoisotopic=monoisotopic) for x in self.antibody_objects]
def extinction_coefficients(self, extinction_coefficient_database='Standard', reduced=False):
"""
:param extinction_coefficient_database: string with the name of the database to use
:param reduced: bool whether to consider the cysteines to be reduced
:return: list
"""
return [x.ab_ec(extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced) for x in self.antibody_objects]
def hydrophobicity_matrix(self):
if self._chain == CHAIN_FLAGS.HEAVY_CHAIN:
num_columns = 158
else:
num_columns = 138
abs_hydrophobicity_matrix = np.zeros((len(self.antibody_objects), num_columns))
for row in range(abs_hydrophobicity_matrix.shape[0]):
abs_hydrophobicity_matrix[row] = self.antibody_objects[row].hydrophobicity_matrix
return abs_hydrophobicity_matrix
def get_object(self, name=''):
"""
:param name: str
:return:
"""
if name in self.names:
index = self.names.index(name)
return self[index]
else:
raise ValueError('Could not find sequence with specified name')
def ab_region_index(self):
"""
method to determine index of amino acids in CDR regions
:return: dictionary with names as keys and each value is a dictionary with keys CDR and FR
'CDR' entry contains dictionaries with CDR1, CDR2 and CDR3 regions
'FR' entry contains dictionaries with FR1, FR2, FR3 and FR4 regions
"""
return {x.name: {'CDR': x.ab_regions()[0], 'FR': x.ab_regions()[1]} for x in self.antibody_objects}
def numbering_table(self, as_array=False, region='all'):
region = numbering_table_region(region)
table = np.row_stack(
[x.ab_numbering_table(as_array=True, region=region) for x in self.antibody_objects])
if as_array:
return table
else:
# return the data as a pandas.DataFrame -> it's slower but looks nicer and makes it easier to get
# the data of interest
whole_sequence_dict, whole_sequence = numbering_table_sequences(region, self._numbering_scheme, self._chain)
multi_index = numbering_table_multiindex(region=region,
whole_sequence_dict=whole_sequence_dict)
# create the DataFrame and assign the columns and index names
data = pd.DataFrame(data=table)
data.columns = multi_index
data.index = self.names
return data
def igblast_server_query(self, chunk_size=50, show_progressbar=True, **kwargs):
"""
:param show_progressbar:
:param chunk_size:
:param kwargs: keyword arguments to pass to igblast_options
:return:
"""
# check if query is larger than 50 sequences
# if so split into several queries
query_list = self._split_to_chunks(chunk_size=chunk_size)
n_chunks = ceil(len(self) / chunk_size) - 1
if show_progressbar:
for query in tqdm(query_list, total=n_chunks):
self._igblast_server_query(query, **kwargs)
else:
for query in query_list:
self._igblast_server_query(query, **kwargs)
def _igblast_server_query(self, query, **kwargs):
# prepare raw data
fasta_query = make_fasta(names=query.names, sequences=query.sequences)
# get url with igblast options
url = igblast_options(sequences=fasta_query, **kwargs)
# send and download query
q = Download(url, verbose=False)
try:
q.download()
except ValueError: # pragma: no cover
raise ValueError("Check the internet connection.") # pragma: no cover
igblast_result = q.html
self._parse_igblast_query(igblast_result, query.names)
def igblast_local_query(self, file_path):
# load in file
with open(file_path, 'r') as f:
igblast_result = f.readlines()
self._parse_igblast_query(igblast_result, self.names)
def append(self, antibody_obj):
self.antibody_objects += antibody_obj
def pop(self, index=-1):
if index > len(self):
raise ValueError("The given index is outside the range of the object.")
element_to_pop = self[index]
self._destroy(index=index)
return element_to_pop
def _destroy(self, index):
del self.antibody_objects[index]
# def filter(self):
#
# # TODO: complete method
# pass
#
def set_numbering_scheme(self, numbering_scheme, realign=True):
if realign:
try:
self._numbering_scheme = numbering_scheme
self.antibody_objects, self._chain = load_from_antibody_object(self.antibody_objects)
except:
print("Could not realign sequences, nothing has been changed.")
else:
self._numbering_scheme = numbering_scheme
@property
def names(self):
return [x.name for x in self.antibody_objects]
@property
def sequences(self):
return [x.sequence for x in self.antibody_objects]
@property
def aligned_sequences(self):
return [x.aligned_sequence for x in self.antibody_objects]
@property
def n_ab(self):
return len(self.sequences)
@property
def chain(self):
if self._chain == '':
chains = set([x.chain for x in self.antibody_objects])
if len(chains) == 1:
self._chain = next(iter(chains))
return self._chain
else:
raise ValueError('Different types of chains found in collection!')
else:
return self._chain
@property
def numbering_scheme(self):
return self._numbering_scheme
@property
def charge(self):
return np.array([x.ab_charge() for x in self.antibody_objects])
@property
def total_charge(self):
return {x.name: x.ab_total_charge() for x in self.antibody_objects}
@property
def germline_identity(self):
return {x.name: x.germline_identity for x in self.antibody_objects}
@property
def germline(self):
return {x.name: x.germline for x in self.antibody_objects}
def _string_summary_basic(self):
return "abpytools.ChainCollection Chain type: {}, Number of sequences: {}".format(self._chain,
len(self.antibody_objects))
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def __len__(self):
return len(self.antibody_objects)
def __getitem__(self, indices):
if isinstance(indices, int):
return self.antibody_objects[indices]
else:
return ChainCollection(antibody_objects=list(itemgetter(*indices)(self.antibody_objects)))
def __add__(self, other):
if isinstance(other, ChainCollection):
if self.numbering_scheme != other.numbering_scheme:
raise ValueError("Concatenation requires ChainCollection "
"objects to use the same numbering scheme.")
else:
new_object_list = self.antibody_objects + other.antibody_objects
elif isinstance(other, Chain):
if self.numbering_scheme != other.numbering_scheme:
raise ValueError("Concatenation requires Chain object to use "
"the same numbering scheme as ChainCollection.")
else:
new_object_list = self.antibody_objects + [other]
else:
raise ValueError("Concatenation requires other to be of type "
"ChainCollection, got {} instead".format(type(other)))
return ChainCollection(antibody_objects=new_object_list, load=False)
def _split_to_chunks(self, chunk_size=50):
"""
Helper function to split ChainCollection into size chunk_size and returns generator
:param chunk_size: int, size of each chunk
:return: generator to iterate of each chunk of size chunk_size
"""
if self.n_ab > chunk_size:
for x in range(0, self.n_ab, chunk_size):
yield self[range(x, min(x + chunk_size, self.n_ab))]
else:
yield self
def _parse_igblast_query(self, igblast_result, names):
igblast_result_dict = load_igblast_query(igblast_result, names)
# unpack results
for name in names:
obj_i = self.get_object(name=name)
obj_i.germline = igblast_result_dict[name][1]
obj_i.germline_identity = igblast_result_dict[name][0]
def loading_status(self):
return [x.status for x in self.antibody_objects]
def composition(self, method='count'):
"""
Amino acid composition of each sequence. Each resulting list is organised alphabetically (see composition.py)
:param method:
:return:
"""
if method == 'count':
return [order_seq(aa_composition(seq)) for seq in self.sequences]
elif method == 'freq':
return [order_seq(aa_frequency(seq)) for seq in self.sequences]
elif method == 'chou':
return chou_pseudo_aa_composition(self.sequences)
elif method == 'triad':
return triad_method(self.sequences)
elif method == 'hydrophobicity':
return self.hydrophobicity_matrix()
elif method == 'volume':
return side_chain_volume(self.sequences)
else:
raise ValueError("Unknown method")
def distance_matrix(self, feature=None, metric='cosine_similarity', multiprocessing=False):
"""
Returns the distance matrix using a given feature and distance metric
:param feature: string with the name of the feature to use
:param metric: string with the name of the metric to use
:param multiprocessing: bool to turn multiprocessing on/off (True/False)
:return: list of lists with distances between all sequences of len(data) with each list of len(data)
when i==j M_i,j = 0
"""
if feature is None:
transformed_data = self.sequences
elif isinstance(feature, str):
# in this case the features are calculated using a predefined featurisation method (see self.composition)
transformed_data = self.composition(method=feature)
elif isinstance(feature, list):
# a user defined list with vectors
if len(feature) != self.n_ab:
raise ValueError("Expected a list of size {}, instead got {}.".format(self.n_ab, len(feature)))
else:
transformed_data = feature
else:
raise TypeError("Unexpected input for feature argument.")
if metric == 'cosine_similarity':
distances = self._run_distance_matrix(transformed_data, cosine_similarity, multiprocessing=multiprocessing)
elif metric == 'cosine_distance':
distances = self._run_distance_matrix(transformed_data, cosine_distance, multiprocessing=multiprocessing)
elif metric == 'hamming_distance':
# be careful hamming distance only works when all sequences have the same length
distances = self._run_distance_matrix(transformed_data, hamming_distance, multiprocessing=multiprocessing)
elif metric == 'levenshtein_distance':
distances = self._run_distance_matrix(transformed_data, levenshtein_distance,
multiprocessing=multiprocessing)
elif metric == 'euclidean_distance':
distances = self._run_distance_matrix(transformed_data, euclidean_distance, multiprocessing=multiprocessing)
elif metric == 'manhattan_distance':
distances = self._run_distance_matrix(transformed_data, manhattan_distance, multiprocessing=multiprocessing)
elif callable(metric):
# user defined metric function
user_function_signature = signature(metric)
# number of params should be two, can take args with defaults though
default_params = sum(['=' in x for x in user_function_signature.parameters])
if len(user_function_signature.parameters) - default_params > 2:
raise ValueError("Expected a function with two parameters")
else:
distances = self._run_distance_matrix(transformed_data, metric, multiprocessing=multiprocessing)
else:
raise ValueError("Unknown distance metric.")
return distances
def _run_distance_matrix(self, data, metric, multiprocessing=False):
"""
Helper function to setup the calculation of each entry in the distance matrix
:param data: list with all sequences
:param metric: function that takes two string and calculates distance
:param multiprocessing: bool to turn multiprocessing on/off (True/False)
:return: list of lists with distances between all sequences of len(data) with each list of len(data)
when i==j M_i,j = 0
"""
if multiprocessing:
with Manager() as manager:
cache = manager.dict()
matrix = manager.dict()
jobs = [Process(target=self._distance_matrix,
args=(data, i, metric, cache, matrix)) for i in range(len(data))]
for j in jobs:
j.start()
for j in jobs:
j.join()
# order the data
return [matrix[x] for x in range(len(data))]
else:
cache = Cache(max_cache_size=(len(data) * (len(data) - 1)) / 2)
matrix = Cache(max_cache_size=len(data))
for i in range(len(data)):
cache.update(i, self._distance_matrix(data, i, metric, cache, matrix))
return [matrix[x] for x in range(len(data))]
@staticmethod
def _distance_matrix(data, i, metric, cache, matrix):
"""
Function to calculate distance from the ith sequence of the ith row to the remaining entries in the same row
:param data: list with all sequences
:param i: int that indicates the matrix row being processed
:param metric: function that takes two string and calculates distance
:param cache: either a Manager or Cache object to cache results
:param matrix: either a Manager or Cache object to store results in a matrix
:return: None
"""
row = []
seq_1 = data[i]
for j, seq_2 in enumerate(data):
if i == j:
row.append(0)
continue
keys = ('{}-{}'.format(i, j), '{}-{}'.format(j, i))
if keys[0] not in cache or keys[1] not in cache:
cache['{}-{}'.format(i, j)] = metric(seq_1, seq_2)
if keys[0] in cache:
row.append(cache[keys[0]])
elif keys[1] in cache:
row.append(cache[keys[0]])
else:
raise ValueError("Bug in row {} and column {}".format(i, j))
matrix[i] = row
def load_antibody_object(antibody_object):
antibody_object.load()
return antibody_object
def load_from_antibody_object(antibody_objects, show_progressbar=True, n_threads=20, verbose=True):
"""
Args:
antibody_objects (list):
show_progressbar (bool):
n_threads (int):
verbose (bool):
Returns:
"""
if verbose:
print("Loading in antibody objects")
from queue import Queue
import threading
q = Queue()
for i in range(n_threads):
t = threading.Thread(target=worker, args=(q,))
t.daemon = True
t.start()
if show_progressbar:
for antibody_object in tqdm(antibody_objects):
q.put(antibody_object)
else:
for antibody_object in antibody_objects:
q.put(antibody_object)
q.join()
# if show_progressbar:
# aprun = parallelexecutor(use_bar='tqdm', n_jobs=n_jobs, timeout=timeout)
# else:
# aprun = parallelexecutor(use_bar='None', n_jobs=n_jobs, timeout=timeout)
#
# # load in objects in parallel
# antibody_objects = aprun(total=len(antibody_objects))(
# delayed(load_antibody_object)(obj) for obj in antibody_objects)
status = [x.status for x in antibody_objects]
failed = sum([1 if x == 'Not Loaded' or x == 'Failed' else 0 for x in status])
# remove objects that did not load
while 'Not Loaded' in status:
i = status.index('Not Loaded')
del antibody_objects[i], status[i]
while 'Failed' in status:
i = status.index('Failed')
del antibody_objects[i], status[i]
if verbose:
print("Failed to load {} objects in list".format(failed))
loaded_obj_chains = [x.chain for x in antibody_objects if x.status == 'Loaded']
if len(set(loaded_obj_chains)) == 1:
chain = loaded_obj_chains[0]
else:
raise ValueError("All sequences must be of the same chain type: Light or Heavy",
set([x.chain for x in loaded_obj_chains]))
n_ab = len(loaded_obj_chains)
if n_ab == 0:
raise ValueError("Could not find any heavy or light chains in provided file or list of objects")
return antibody_objects, chain
def load_igblast_query(igblast_result, names):
"""
:param names:
:param igblast_result:
:return:
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Please install bs4 to parse the IGBLAST html file:"
"pip install beautifulsoup4")
# instantiate BeautifulSoup object to make life easier with the html text!
if isinstance(igblast_result, list):
soup = BeautifulSoup(''.join(igblast_result), "lxml")
else:
soup = BeautifulSoup(igblast_result, "lxml")
# get the results found in <div id="content"> and return the text as a string
results = soup.find(attrs={'id': "content"}).text
# get query names
query = re.compile('Query: (.*)')
query_ids = query.findall(results)
# make sure that all the query names in query are in self.names
if not set(names).issubset(set(query_ids)):
raise ValueError('Make sure that you gave the same names in ChainCollection as you gave'
'in the query submitted to IGBLAST')
# regular expression to get tabular data from each region
all_queries = re.compile('(Query: .*?)\n\n\n\n', re.DOTALL)
# parse the results with regex and get a list with each query data
parsed_results = all_queries.findall(results)
# regex to get the FR and CDR information for each string in parsed results
region_finder = re.compile('^([CDR\d|FR\d|Total].*)', re.MULTILINE)
result_dict = {}
# iterate over each string in parsed result which contains the result for individual queries
for query_result in parsed_results:
# get query name and get the relevant object
query_i = query.findall(query_result)[0]
# check if the query being parsed is part of the object
# (not all queries have to be part of the object, but the object names must be a subset of the queries)
if query_i not in set(names):
continue
# list with CDR and FR info for query result
region_info = region_finder.findall(query_result)
# get the data from region info with dict comprehension
germline_identity = {x.split()[0].split('-')[0]: float(x.split()[-1]) for x in region_info}
# get the top germline assignment
v_line_assignment = re.compile('V\s{}\t.*'.format(query_i))
# the top germline assignment is at the top (index 0)
germline_result = v_line_assignment.findall(results)[0].split()
# store the germline assignment and the bit score in a tuple as the germline attribute of Chain
germline = (germline_result[2], float(germline_result[-2]))
result_dict[query_i] = (germline_identity, germline)
return result_dict
def worker(q):
while True:
item = q.get()
load_antibody_object(item)
q.task_done()
def make_fasta(names, sequences):
file_string = ''
for name, sequence in zip(names, sequences):
file_string += '>{}\n'.format(name)
file_string += '{}\n'.format(sequence)
return file_string
def igblast_options(sequences, domain='imgt',
germline_db_V='IG_DB/imgt.Homo_sapiens.V.f.orf.p',
germline_db_D='IG_DB/imgt.Homo_sapiens.D.f.orf',
germline_db_J='IG_DB / imgt.Homo_sapiens.J.f.orf',
num_alignments_V=1, num_alignments_D=1, num_alignments_J=1):
values = {"queryseq": sequences,
"germline_db_V": germline_db_V,
"germline_db_D": germline_db_D,
"germline_db_J": germline_db_J,
"num_alignments_V": str(num_alignments_V),
"num_alignments_D": str(num_alignments_D),
"num_alignments_J": str(num_alignments_J),
"outfmt": "7",
"domain": domain,
"program": "blastp"}
url = "http://www.ncbi.nlm.nih.gov/igblast/igblast.cgi?"
url += parse.urlencode(values)
return url | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/timing/Streamer.js.uncompressed.js | define("dojox/timing/Streamer", ["./_base"], function(){
dojo.experimental("dojox.timing.Streamer");
dojox.timing.Streamer = function(
/* function */input,
/* function */output,
/* int */interval,
/* int */minimum,
/* array */initialData
){
// summary
// Streamer will take an input function that pushes N datapoints into a
// queue, and will pass the next point in that queue out to an
// output function at the passed interval; this way you can emulate
// a constant buffered stream of data.
// input: the function executed when the internal queue reaches minimumSize
// output: the function executed on internal tick
// interval: the interval in ms at which the output function is fired.
// minimum: the minimum number of elements in the internal queue.
var self = this;
var queue = [];
// public properties
this.interval = interval || 1000;
this.minimumSize = minimum || 10; // latency usually == interval * minimumSize
this.inputFunction = input || function(q){ };
this.outputFunction = output || function(point){ };
// more setup
var timer = new dojox.timing.Timer(this.interval);
var tick = function(){
self.onTick(self);
if(queue.length < self.minimumSize){
self.inputFunction(queue);
}
var obj = queue.shift();
while(typeof(obj) == "undefined" && queue.length > 0){
obj = queue.shift();
}
// check to see if the input function needs to be fired
// stop before firing the output function
// TODO: relegate this to the output function?
if(typeof(obj) == "undefined"){
self.stop();
return;
}
// call the output function.
self.outputFunction(obj);
};
this.setInterval = function(/* int */ms){
// summary
// sets the interval in milliseconds of the internal timer
this.interval = ms;
timer.setInterval(ms);
};
this.onTick = function(/* dojox.timing.Streamer */obj){ };
// wrap the timer functions so that we can connect to them if needed.
this.start = function(){
// summary
// starts the Streamer
if(typeof(this.inputFunction) == "function" && typeof(this.outputFunction) == "function"){
timer.start();
return;
}
throw new Error("You cannot start a Streamer without an input and an output function.");
};
this.onStart = function(){ };
this.stop = function(){
// summary
// stops the Streamer
timer.stop();
};
this.onStop = function(){ };
// finish initialization
timer.onTick = this.tick;
timer.onStart = this.onStart;
timer.onStop = this.onStop;
if(initialData){
queue.concat(initialData);
}
};
return dojox.timing.Streamer;
}); | PypiClean |
/MnemoPwd-1.2.1-py3-none-any.whl/mnemopwd/client/uilayer/uicomponents/VertScrollBar.py |
# Copyright (c) 2017, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import curses
import math
from .Component import Component
class VertScrollBar(Component):
"""A vertical scrolling bar"""
def __init__(self, parent, h, y, x, colour=False):
"""Initialization of a VertScrollBar instance"""
Component.__init__(self, parent, h, 2, y, x, False)
self.size = 0 # Vertical length of the scrolling bar
self.pos = 0 # Position of the scrolling bar
self.count = 0 # Counter for scrolling up or scrolling down
self.counter = 0 # Counter of 'up' and 'down' movements
self.content_size = 0 # Save content size for adjusting
self.colour = colour
self._create()
def is_actionable(self):
"""See mother class"""
return False
def update(self, content_size):
"""Update scrolling bar size"""
self.content_size = content_size
# Compute scrolling bar length
size = max(1, int(math.floor(self.h * self.h / content_size)))
do_redraw = self.size != size
if size < self.h:
self.size = size # New scrolling bar length
else:
self.size = 0 # No scrolling bar
if do_redraw:
self._create()
def scroll(self, direction):
"""
Try to scrolling up or scrolling down
Scrolling depends on the number of 'up' or 'down' done by user
"""
self.counter += direction # Counter of 'up' and 'down'
do_redraw = self.counter == self.content_size - self.h
if self.size > 0:
self.count += direction
pos = self.pos
if math.fabs(self.count) == math.floor(self.content_size / self.h):
pos += direction
self.count = 0
pos = max(0, pos) # Top limit
pos = min(pos, self.h - self.size) # Bottom limit
do_redraw = pos != self.pos # Redraw if pos has changed
self.pos = pos
if do_redraw:
self._create()
def redraw(self):
"""See the mother class"""
self._create()
def reset(self):
"""Re-initialize attributes"""
self.pos = self.size = self.content_size = self.count = self.counter = 0
def _create(self):
"""Draw the widget content"""
if self.h >= 2:
# Draw standard shape
for i in range(1, self.h - 1):
self.window.addch(i, 0, curses.ACS_VLINE | self.colour) # '|'
# Draw scrolling bar if necessary
if self.size > 0:
end = min(self.pos + self.size, self.h)
for i in range(self.pos, end):
self.window.addch(i, 0, chr(0x2588), self.colour) # '█'
# Draw arrows if necessary
if self.counter > 0:
self.window.addch(0, 0, chr(0x25B2), self.colour) # '▲'
if self.counter < self.content_size - self.h:
self.window.addch(self.h - 1, 0, chr(0x25BC), self.colour) # '▼'
# Finally refresh window
self.window.refresh() | PypiClean |
/Ibidas-0.1.26.tar.gz/Ibidas-0.1.26/ibidas/__init__.py | __all__ = ["Rep","Read", "Write", "Import", "Export", "Connect","_","CyNetwork",'Unpack', "Addformat",
"Array","Tuple","Combine","HArray",
"Stack","Intersect","Union","Except","Difference",
"Pos","Argsort","Rank","IsMissing","CumSum",
"Any","All",
"Max","Min",
"Argmin","Argmax",
"Mean","Median",
"Sum","Prod",
"Count","Match", "Blast", "Join",
"Broadcast","CreateType","MatchType",
"newdim","NEWDIM","LCDIM","LASTCOMMONDIM","COMMON_NAME",'COMMON_POS', "Missing",
"Corr","In","Contains",
"Fetch","Serve","Get","Alg",
"Load","Save",
"Invert","Abs", "Negative","Log","Log2","Log10","Sqrt","Upper","Lower",
"Add","Subtract","Multiply","Modulo","Divide","FloorDivide","And","Or","Xor","Power","Equal","NotEqual","LessEqual","Less","GreaterEqual","Greater","Each",
"Like","SplitOnPattern","HasPattern"
]
from utils import delay_import
from utils.util import save_rep, load_rep, save_csv
from utils.context import _
from utils.missing import Missing
from utils.infix import Infix
from itypes import createType as CreateType, matchType as MatchType
from wrappers.python import Rep
from constants import *
from repops import Detect
import repops_dim
from repops_multi import Broadcast, Combine, Sort
import repops_slice
import repops_funcs
from download_cache import DownloadCache, Unpack
from pre import predefined_sources as Get
from algs import predefined_algs as Alg
from wrappers.cytoscape import CyNetwork
from server import Serve
from constants import *
Fetch = DownloadCache()
In = Infix(repops_funcs.Within)
Contains = Infix(repops_funcs.Contains)
Join = Infix(repops_multi.Join)
Match = Infix(repops_multi.Match)
Blast = Infix(repops_multi.Blast)
Stack = Infix(repops_multi.Stack)
Intersect = Infix(repops_multi.Intersect)
Union = Infix(repops_multi.Union)
Except = Infix(repops_multi.Except)
Difference = Infix(repops_multi.Difference)
Pos = repops.delayable(default_params="#")(repops_funcs.Pos)
IsMissing = repops.delayable()(repops_funcs.IsMissing)
Argsort = repops.delayable()(repops_funcs.Argsort)
Rank = repops.delayable()(repops_funcs.Rank)
CumSum = repops.delayable()(repops_funcs.CumSum)
Argmax = repops.delayable()(repops_funcs.Argmin)
Argmin = repops.delayable()(repops_funcs.Argmax)
Sum = repops.delayable()(repops_funcs.Sum)
Prod = repops.delayable()(repops_funcs.Prod)
Any = repops.delayable()(repops_funcs.Any)
All = repops.delayable()(repops_funcs.All)
Max = repops.delayable()(repops_funcs.Max)
Min = repops.delayable()(repops_funcs.Min)
Mean = repops.delayable()(repops_funcs.Mean)
Median = repops.delayable()(repops_funcs.Median)
Count = repops.delayable()(repops_funcs.Count)
Corr = repops.delayable()(repops_funcs.Corr)
Invert = repops.delayable()(repops_funcs.Invert)
Abs = repops.delayable()(repops_funcs.Abs)
Negative = repops.delayable()(repops_funcs.Negative)
Log = repops.delayable()(repops_funcs.Log)
Log2 = repops.delayable()(repops_funcs.Log2)
Log10 = repops.delayable()(repops_funcs.Log10)
Sqrt = repops.delayable()(repops_funcs.Sqrt)
Upper = repops.delayable()(repops_funcs.Upper)
Lower = repops.delayable()(repops_funcs.Lower)
Like = repops.delayable()(repops_funcs.Like)
SplitOnPattern = repops.delayable()(repops_funcs.SplitOnPattern)
HasPattern = repops.delayable()(repops_funcs.HasPattern)
Add = repops.delayable()(repops_funcs.Add)
Subtract = repops.delayable()(repops_funcs.Subtract)
Multiply = repops.delayable()(repops_funcs.Multiply)
Modulo= repops.delayable()(repops_funcs.Modulo)
Divide = repops.delayable()(repops_funcs.Divide)
FloorDivide = repops.delayable()(repops_funcs.FloorDivide)
And = repops.delayable()(repops_funcs.And)
Or = repops.delayable()(repops_funcs.Or)
Xor = repops.delayable()(repops_funcs.Xor)
Power = repops.delayable()(repops_funcs.Power)
Equal = repops.delayable()(repops_funcs.Equal)
NotEqual = repops.delayable()(repops_funcs.NotEqual)
LessEqual = repops.delayable()(repops_funcs.LessEqual)
Less = repops.delayable()(repops_funcs.Less)
GreaterEqual = repops.delayable()(repops_funcs.GreaterEqual)
Greater = repops.delayable()(repops_funcs.Greater)
Each = repops.delayable()(repops_slice.Each)
HArray = repops.delayable(nsources=UNDEFINED)(repops_slice.HArray)
Tuple = repops.delayable(nsources=UNDEFINED)(repops_slice.Tuple)
Array = repops.delayable()(repops_dim.Array)
##########################################################################
def fimport_tsv(url, **kwargs):
from wrappers.tsv import TSVRepresentor
return TSVRepresentor(url, **kwargs)
def fimport_matrixtsv(url, **kwargs):
from wrappers.matrix_tsv import MatrixTSVRepresentor
return MatrixTSVRepresentor(url, **kwargs)
def fimport_xml(url, **kwargs):
from wrappers.xml_wrapper import XMLRepresentor
return XMLRepresentor(url, **kwargs)
def fimport_psimi(url, **kwargs):
from wrappers.psimi import read_psimi
return read_psimi(url, **kwargs)
def fimport_fasta(url, **kwargs):
from wrappers.fasta import read_fasta;
return read_fasta(url, **kwargs);
def fimport_fastq(url, **kwargs):
from wrappers.fasta import read_fastq;
return read_fastq(url, **kwargs)
def fimport_vcf(url, **kwargs):
from wrappers.vcf import VCFRepresentor
return VCFRepresentor(url, **kwargs)
def fimport_genbank(url, **kwargs):
from wrappers.genbank_embl import GERepresentor
return GERepresentor(url, type='genbank', **kwargs)
def fimport_embl(url, **kwargs):
from wrappers.genbank_embl import GERepresentor
return GERepresentor(url, type='embl', **kwargs)
##########################################################################
def fexport_tsv(data, url, **kwargs):
#from wrappers.tsv import TSVRepresentor
return save_csv(data, url, **kwargs)
def fexport_matrixtsv(data, url, **kwargs):
from wrappers.matrix_tsv import MatrixTSVRepresentor
return MatrixTSVRepresentor(data, url, **kwargs)
def fexport_xml(data, url, **kwargs):
from wrappers.xml_wrapper import XMLRepresentor
return XMLRepresentor(data, url, **kwargs)
def fexport_psimi(data, url, **kwargs):
from wrappers.psimi import write_psimi
return write_psimi(data, url, **kwargs)
def fexport_fasta(data, url, **kwargs):
from wrappers.fasta import write_fasta;
return write_fasta(data, url, **kwargs);
##########################################################################
formats_import = { 'tsv' : fimport_tsv, 'csv' : fimport_tsv,
'tsv_matrix' : fimport_matrixtsv,
'xml' : fimport_xml,
'psimi' : fimport_psimi,
'fasta' : fimport_fasta, 'fa' : fimport_fasta, 'fas' : fimport_fasta,
'fastq' : fimport_fastq,
'vcf': fimport_vcf,
'gbff':fimport_genbank, 'gb': fimport_genbank, 'genbank': fimport_genbank, 'gbk':fimport_genbank, 'embl':fimport_embl,
};
formats_export = { 'tsv' : fexport_tsv, 'csv' : fexport_tsv,
'tsv_matrix' : fexport_matrixtsv,
'xml' : fexport_xml,
'psimi' : fexport_psimi,
'fasta' : fexport_fasta, 'fa' : fexport_fasta, 'fas' : fexport_fasta
};
def Addformat(ext, read_fn, write_fn=None):
formats_import[ext] = read_fn;
formats_export[ext] = write_fn;
def Import(url, **kwargs):
from os.path import splitext;
detect=kwargs.pop('detect', False);
base = url;
while True:
(base, ext) = splitext(base);
ext = ext.split('.')[1] if ext else 'tsv';
format = kwargs.pop('format', ext).lower();
if not format:
raise RuntimeError("Unknown format specified")
if format not in formats_import:
continue;
else:
data = formats_import[format](url, **kwargs);
return data.Detect() if detect else data;
Read = Import
def Export(r, url, **kwargs):
from os.path import splitext;
base = url;
while True:
(base, ext) = splitext(base);
ext = ext.split('.')[1] if ext else 'tsv';
format = kwargs.pop('format', ext).lower();
if not format:
raise RuntimeError("Unknown format specified")
if format not in formats_export:
continue;
else:
return formats_export[format](r, url, **kwargs);
Write = Export;
def Connect(url, **kwargs):
format = kwargs.pop('format','db')
if(format == "db"):
from wrappers.sql import open_db
return open_db(url, **kwargs)
else:
raise RuntimeError("Unknown format specified")
def Save(r, filename, **kwargs):
if filename.endswith('tsv') or filename.endswith('csv') or filename.endswith('tab'):
save_csv(r, filename, **kwargs);
else:
save_rep(r, filename, **kwargs);
def Load(filename,**kwargs):
if filename.endswith('tsv') or filename.endswith('csv') or filename.endswith('tab'):
from wrappers.tsv import TSVRepresentor
return TSVRepresentor(filename, **kwargs)
else:
return load_rep(filename)
delay_import.perform_delayed_imports() | PypiClean |
/Divisi-0.6.10.tar.gz/Divisi-0.6.10/csc/divisi/blend.py | from csc.divisi.tensor import DictTensor, Tensor
from csc.divisi.ordered_set import OrderedSet
from csc.divisi.labeled_view import LabeledView
from csc.divisi.normalized_view import MeanSubtractedView
from itertools import chain, izip
import logging
from math import sqrt, ceil
def partial_list_repr(lst, max_len):
if len(lst) <= max_len:
return repr(lst)
else:
return u'[%s, ... (%d total)]' % (
', '.join(repr(item) for item in lst[:max_len]),
len(lst))
class Blend(LabeledView):
def __init__(self, tensors, weights=None, factor=None, k_values=1, svals=None, auto_build_tensor=True):
'''
Create a new Blend from a list of tensors.
tensors : [Tensor]
a list of tensors to blend
weights : [float]
how much to weight each tensor
factor : float
the blending factor, only valid if len(tensors)==2. weights=[1-factor, factor].
k_values : int or [int]
number of singular values to consider for each matrix's variance
svals : [[float]]
If you know the svals of any of the tensors, pass them in here. Use ``None``
or ``[]`` if you don't know a value.
Various optimizations are possible if keys never overlap. This
case is automatically detected -- though it may be overly
cautious.
'''
self.logger = logging.getLogger('csc.divisi.Blend')
self.k_values = k_values
self.tensors = tensors
self._svals = svals
# Can't call __init__ for either LabeledView or View 's init,
# because they expect the tensor to be passed.
#View.__init__(self)
if factor is not None:
if weights is not None:
raise TypeError('Cannot simultaneously specify factor and weights.')
self.factor = factor
else:
self.weights = weights
self.auto_build_tensor = auto_build_tensor
def __repr__(self):
return u'<Blend of %s, weights=%s>' % (partial_list_repr(self.names, 3), partial_list_repr(self.weights, 3))
def __getstate__(self):
return dict(
version=1,
tensors=self.tensors,
weights=self.weights,
k_values=self.k_values,
svals=self._svals,
auto_build_tensor=self.auto_build_tensor)
def __setstate__(self, state):
version = state.pop('version', 1)
if version > 1:
raise TypeError('Blend pickle was created by a newer version.')
self.logger = logging.getLogger('csc.divisi.Blend')
self.tensors = state['tensors']
self.k_values = state.get('k_values', 1)
self._svals = state.get('svals', None)
self.weights = state['weights']
self.auto_build_tensor = state.get('auto_build_tensor', True)
def bake(self):
'''
Return a normal LabeledView with the current contents of the blend.
'''
if self._tensor is None: self.build_tensor()
return LabeledView(self.tensor, self._labels)
def _set_tensors(self, tensors):
'''
Set the input tensors. Computes the label lists also. You
should not call this function directly; rather, assign to
blend.tensors.
You can pass a ``dict`` or sequence of ``(label, tensor)``
pairs; the tensors will be labeled according to the keys.
'''
if isinstance(tensors, Tensor):
raise TypeError('Give Blend a _list_ (or dict or whatever) of tensors.')
if hasattr(tensors, 'items'):
# Extract the items, if we have some.
tensors = tensors.items()
if isinstance(tensors[0], (list, tuple)):
# Assign names. Don't call `dict()`, in case a sequence
# was passed and two tensors have the same label.
names, tensors = zip(*tensors)
else:
names = map(repr, tensors)
for tensor in tensors:
if tensor.stack_contains(MeanSubtractedView):
raise TypeError("You can't blend MeanSubtractedViews. Try mean-subtracting the resulting blend.")
self._tensors = tuple(tensors)
self.names = tuple(names)
self.logger.info('tensors: %s', ', '.join(self.names))
self.ndim = ndim = tensors[0].ndim
if not all(tensor.ndim == ndim for tensor in tensors):
raise TypeError('Blended tensors must have the same dimensionality.')
self.logger.info('Making ordered sets')
self._labels = labels = [OrderedSet() for _ in xrange(ndim)]
self.label_overlap = label_overlap = [0]*ndim
for tensor in self._tensors:
for dim, label_list in enumerate(labels):
for key in tensor.label_list(dim):
# XXX(kcarnold) This checks containment twice.
if key in label_list: label_overlap[dim] += 1
else: label_list.add(key)
self._shape = tuple(map(len, labels))
self._keys_never_overlap = not all(label_overlap)
self.logger.info('Done making ordered sets. label_overlap: %r', label_overlap)
if not any(label_overlap):
self.logger.warn('No labels overlap.')
# Invalidate other data
self._weights = self._tensor = self._svals = None
tensors = property(lambda self: self._tensors, _set_tensors)
@property # necessary because it's a property on the parent class
def shape(self): return self._shape
def tensor_svals(self, tensor_idx, num_svals):
'''
Get the top num_svals singular values for one of the input tensors.
'''
if self._svals is None: self._svals = [[]]*len(self._tensors)
if num_svals > len(self._svals[tensor_idx] or []):
self.logger.info('computing SVD(k=%d) for %s', num_svals, self.names[tensor_idx])
self._svals[tensor_idx] = self._tensors[tensor_idx].svd(k=num_svals).svals.values()
return self._svals[tensor_idx][:num_svals]
def rough_weight(self, tensor_idx):
'''
Compute the rough weight for one of the input tensors.
'''
k = self.k_values
if isinstance(k, (list, tuple)): k = k[tensor_idx]
return 1.0/sqrt(sum([x*x for x in self.tensor_svals(tensor_idx, k)[:k]]))
def _set_weights(self, weights):
if weights is None:
# Rough blend
self._weights = [self.rough_weight(tensor) for tensor in xrange(len(self.tensors))]
self.normalize_weights()
elif weights == '=':
# Equal weights, summing to 1
self._weights = [1]*len(self.tensors)
self.normalize_weights()
elif isinstance(weights, (int, long, float)):
# Equal weights of the given value
self._weights = [float(weights)]*len(self.tensors)
else:
# Explicit
if weights == self._weights: return # If same, no-op.
if len(weights) != len(self._tensors):
raise TypeError('Weight length mismatch')
self._weights = tuple(weights)
self._tensor = None # invalidate the tensor
weights = property(lambda self: self._weights, _set_weights)
def _get_factor(self):
if len(self._tensors) != 2:
raise TypeError('Only blends of 2 tensors have a single factor.')
return self._weights[1]
def _set_factor(self, factor):
if len(self._tensors) != 2:
raise TypeError('Only blends of 2 tensors have a single factor.')
if not 0 <= factor <= 1:
raise ValueError('factor must be between 0 and 1.')
self.weights = [1.0-factor, float(factor)]
factor = property(_get_factor, _set_factor)
def normalize_weights(self):
'''
Make the weights sum to 1.
'''
self.logger.info('Normalizing weights')
scale = 1.0 / float(sum(self._weights))
self._weights = tuple(factor * scale for factor in self._weights)
@property
def tensor(self):
if self._tensor is None:
if not self.auto_build_tensor:
raise TypeError("Tensor not yet built. Run 'build_tensor'.")
self.build_tensor()
return self._tensor
def build_tensor(self, tensor=None):
'''
Build the combined tensor. Done explicitly because it's slow.
If `tensor` is not None, it is used as the underlying numeric
storage tensor. It should have the same number of dimensions
as the blend. It defaults to a new DictTensor.
'''
self.logger.info('building combined tensor.')
labels = self._labels
if tensor is None: tensor = DictTensor(ndim=self.ndim)
assert tensor.ndim == self.ndim
if self._keys_never_overlap:
self.logger.info('fast-merging.')
tensor.update((tuple(label_list.index(label) for label_list, label in izip(labels, key)), val)
for key, val in self._fast_iteritems())
else:
for factor, cur_tensor, name in zip(self._weights, self._tensors, self.names):
self.logger.info('slow-merging %s' % name)
for key, val in cur_tensor.iteritems():
tensor.inc(tuple(label_list.index(label) for label_list, label in izip(labels, key)), factor*val)
self._tensor = tensor
self.logger.info('done building tensor.')
def svd(self, *a, **kw):
'''
Computes the SVD of the blend. Builds the tensor if necessary
and it is not yet built.
When the keys never overlap, this uses an optimized routine.
'''
if not self._keys_never_overlap or self._tensor is not None:
# Slow case
self.logger.info('Non-optimized svd')
if self._tensor is None: self.build_tensor()
return super(Blend, self).svd(*a, **kw)
# No overlap, so iteritems is straightforward. Exploit that
# for some speed.
from csc.divisi.svd import svd_sparse
from csc.divisi.labeled_view import LabeledSVD2DResults
self.logger.info('Optimized svd')
_svd = svd_sparse(self.fake_tensor(), *a, **kw)
return LabeledSVD2DResults.layer_on(_svd, self)
# Optimizations
def fake_tensor(self):
'''
Return a tensor that only knows how to do iteritems. But fast.
Used for :meth:`svd`.
'''
if not self._keys_never_overlap:
raise TypeError('Can only get a fake tensor if keys never overlap.')
length = len(self)
class FakeTensor(object):
ndim = self.ndim
shape = self.shape
def __len__(ft):
return length
def iteritems(ft):
labels = self._labels
for factor, cur_tensor in zip(self._weights, self._tensors):
for key, val in cur_tensor.iteritems():
yield (tuple(label_list.index(label) for label_list, label in izip(labels, key)),
factor*val)
def _svd(ft, *a, **kw):
from csc.divisi._svdlib import svd
return svd(ft, *a, **kw)
return FakeTensor()
def __iter__(self):
if self._keys_never_overlap:
return chain(*self.tensors)
else:
return (self.labels(idx) for idx in self.tensor)
def _fast_iteritems(self):
return ((key, factor*val)
for factor, cur_tensor in zip(self._weights, self._tensors)
for key, val in cur_tensor.iteritems())
def iteritems(self):
if self._keys_never_overlap:
return self._fast_iteritems()
else:
return super(Blend, self).iteritems()
def __len__(self):
if self._keys_never_overlap:
return sum(map(len, self.tensors))
else:
return len(self.tensor)
# Visualization
def coverage(self, bin_size=50):
'''
Compute the coverage of the blend space by the input tensors.
Returns NumPy 2D arrays ``(fill, magnitude, src)``. ``fill``
indicates how densely filled each "bin" is, from 0.0 (empty)
to 1.0 (full). ``magnitude`` accumulates the absolute values
of the items within the bin. ``src`` indicates which tensor
each item comes from, specified by its index in the
``tensors`` array. (If multiple tensors write in the same
bin, the last one wins.)
'''
if not isinstance(bin_size, (list, tuple)):
bin_size = [bin_size]*self.ndim
import numpy
src = numpy.zeros(tuple(numpy.ceil(float(items) / float(bins))
for items, bins in izip(self.shape, bin_size)),
dtype=numpy.uint8)
magnitude = numpy.zeros(src.shape)
fill = numpy.zeros(src.shape)
inc = 1.0 / numpy.product(bin_size)
# This loop should look a lot like the one in FakeTensor.
labels = self._labels
for tensor_idx, tensor in enumerate(self._tensors):
for key, val in tensor.iteritems():
idx = tuple(label_list.index(label) // bins for label_list, label, bins in izip(labels, key, bin_size))
src[idx] = tensor_idx
fill[idx] += inc
magnitude[idx] += abs(val)
return fill, magnitude, src
def coverage_image(self, width=None, height=None, pixel_size=None, *a, **kw):
'''
Generate a coverage image of this blend. You can specify the size of the image in one of two ways:
``pixel_size``: the size of a pixel in rows and columns
(defaults to square if a single number is passed)
``width`` and/or ``height``: the target width and height of
the image. If it doesn't fit evenly, the image may be
slightly bigger than you specify. Defaults to square pixels
if one or the other is unspecified.
Or if you give no parameters, the width defaults to 1000 pixels.
For more information, see ``csc.divisi.blend.CoverageImage``.
'''
# Compute the image size.
if pixel_size is None and width is None and height is None:
# Default to 1000 pixels wide.
width = 1000
if pixel_size is None:
# Compute the dimensions that are specified.
pixel_width = pixel_height = None
if width is not None:
pixel_width = int(ceil(float(self.shape[1]) / width))
if height is not None:
pixel_height = int(ceil(float(self.shape[0]) / height))
# Fill in, defaulting to square.
pixel_size = (pixel_height if pixel_height is not None else pixel_width,
pixel_width if pixel_width is not None else pixel_height)
else:
if width is not None or height is not None:
raise TypeError("Can't specify both pixel_size and width/height.")
self.logger.debug('Making coverage image with pixel_size=%r', pixel_size)
# Generate the raw coverage data.
fill, magnitude, src = self.coverage(pixel_size)
return CoverageImage(fill, magnitude, src, self.names, *a, **kw)
# Blend analysis utilities
def predicted_svals(self, num=50, for_each_tensor=None, track_origin=False):
'''
Predict the resulting singular values by multiplying the
original singular values by the corresponding blend factor and
sorting.
Parameters
----------
num : int
Total number of svals to return
for_each_tensor : int, optional
number of singular values to consider for each tensor. If this is
too small, some extraneous svals may make it into the top `num`.
If not given, values `num` are considered.
track_origin : boolean, default False
If true, returns a list of (sval, tensor_idx).
'''
if for_each_tensor is None: for_each_tensor = num
if track_origin:
elt = lambda sval, factor, idx: (sval*factor, idx)
else:
elt = lambda sval, factor, idx: sval*factor
svals = [elt(sval, factor, idx)
for idx, factor in enumerate(self.weights)
for sval in self.tensor_svals(idx, for_each_tensor)]
svals.sort(reverse=True)
return svals[:num]
def total_veering(self, num=50, for_each_tensor=None, actual_svals=None):
'''
Calculate total veering.
If you already have the singular values, pass them in as a list / array
for a faster result.
'''
predicted_svals = self.predicted_svals(num, for_each_tensor)
if actual_svals is None:
self.logger.info('computing actual singular values')
actual_svals = self.tensor.svd(num).svals.values()
num = min(num, len(actual_svals))
return sum((actual_svals[idx] - predicted_svals[idx][0])**2
for idx in xrange(num))
def total_veering_at_factor(self, factor, **kw):
"Calculates the total veering at a particular factor."
return self.at_factor(factor).total_veering(**kw)
def predicted_svals_at_factor(self, factor, **kw):
return self.at_factor(factor).predicted_svals(**kw)
def svals_at_factor(self, factor, *a, **kw):
return self.at_factor(factor).svd(*a, **kw).svals.values()
def at_factor(self, factor):
# FIXME: take advantage of the fact that the labels don't change.
return Blend(self.tensors, factor=factor,
k_values=self.k_values, svals=self._svals)
def compressed_svd_u(self, k=100):
"""
Not done yet. --Rob
"""
labelset = set()
for t in self.weights:
labelset += set(t.label_list(0))
ulabels = OrderedSet(list(labelset))
svds = [t.svd(k) for t in self.weights]
class CoverageImage(object):
def __init__(self, fill, magnitude, src, names):
'''
Create a coverage image.
Each input gets a color. Intensity indicates density.
If you have PIL, you can call ``.save(filename)`` on the
resulting object to save an image. Otherwise, there will be a
plain NumPy array at ``.arr``.
'''
from colorsys import hsv_to_rgb
import numpy
# Create the hues array.
n = len(names)
hues = numpy.linspace(0, 1, n+1)
# Re-order the hues for the maximum separation between adjacent items.
increment = (n+1)/2.0
hues = [hues[int(i*increment) % n] for i in xrange(n)]
rows, cols = src.shape
# Scale "fill" values to the dynamic range.
fill_scale = self.fill_scale = 1.0/fill.max()
## Scale "magnitude" values to the dynamic range. Avoid having
## too low saturation, so we set a minimum.
#min_saturation = 0.5
#magnitude_scale = self.magnitude_scale = (1.0-min_saturation)/magnitude.max()
# Create an empty white image.
img = numpy.zeros((rows, cols, 3), dtype=numpy.uint8)
img[:,:,:] = 255
# Fill it.
for row in xrange(rows):
for col in xrange(cols):
idx = row, col
fill_amt = fill[idx]
if not fill_amt: continue # skip if empty.
rgb = numpy.array(hsv_to_rgb(hues[src[idx]],
1,#magnitude[idx] * magnitude_scale + min_saturation,
1-fill_amt*fill_scale))
img[row, col, :] = rgb*255
self.names = names
self.arr = img
self.hues = hues
@property
def img(self):
import Image
return Image.fromarray(self.arr)
def save(self, filename, *a, **kw):
return self.img.save(filename, *a, **kw)
@property
def colors(self):
from colorsys import hsv_to_rgb
return [hsv_to_rgb(hue, 1, 1) for hue in self.hues]
def save_pdf(self, filename, margins=(1,1)):
'''
Make and save a PDF of this coverage plot, including a legend.
Margins are expressed in inches: (top-bottom, left-right).
'''
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen.canvas import Canvas
c = Canvas(filename, pagesize=letter)
# Compute margins.
margin_top, margin_left = margins
margin_top *= inch; margin_left *= inch
whole_page_width, whole_page_height = letter
page_top = whole_page_height - margin_top
page_left = margin_left
page_width = whole_page_width - 2*margin_left
# Show the main image.
image = self.img
image_width = page_width
image_height = image_width / image.size[0] * image.size[1]
image_x = page_left
image_y = page_top - image_height
c.drawInlineImage(image, image_x, image_y, width=image_width, height=image_height)
# Draw legends beneath the image.
textobject = c.beginText()
textobject.setTextOrigin(page_left, image_y - .5*inch)
textobject.setFont('Helvetica', 14)
for name, color in izip(self.names, self.colors):
textobject.setFillColorRGB(*color)
textobject.textLine(name)
c.drawText(textobject)
# Done.
c.showPage()
c.save() | PypiClean |
/Elpotrero-1.6.2.tar.gz/Elpotrero-1.6.2/elpotrero/_files/tree/scripts/readmes/install.README | Make sure to set remotehost.prd in /etc/hosts file, otherwise you are going to be trying to connect to it and not getting the right server address.
You should also temporarily set up remotehost.com as well while you are waiting for the DNS file to be propagated through the system, just make sure to change it back to normal later!
Useful link: https://help.ubuntu.com/community/AptGet/Howto#Commands
https://help.ubuntu.com/community/AddUsersHowto
This is a really great tutorial on setting up a django/gunicorn/nginx stack. Very thorough
http://michal.karzynski.pl/blog/2013/06/09/django-nginx-gunicorn-virtualenv-supervisor/
sudo useradd riendas -m -s /bin/bash
sudo passwd riendas
sudo addgroup webdev
sudo adduser riendas webdev
-----------------------------------
SUDOERS - DO THIS SO YOU CAN PROPERLY EXECUTE PYTHON AS "sudo":
https://help.ubuntu.com/community/Sudoers#Editing_the_sudoers_file
edit the /home/user/.bashrc file with the following addition:
export EDITOR="vim"
sudo -E visudo
add the following in the file:
Defaults env_keep += "PYTHONPATH"
Defaults editor=/usr/bin/vim"
riendas ALL=(ALL:ALL) ALL
---------------------------------------
https://help.ubuntu.com/lts/serverguide/mysql.html
sudo apt-get install mysql-server
sudo apt-get install libmysqlclient-dev
https://www.digitalocean.com/community/tutorials/how-to-install-and-use-postgresql-on-ubuntu-14-04
sudo apt-get install postgresql
sudo apt-get install postgresql-server-dev-X.Y
https://help.ubuntu.com/community/Mercurial
sudo apt-get install mercurial meld
****READ THE NOTES ON NGINX INSTALLATION******
sometimes there is a problem with installing nginx, because ubuntu by default installs the nginx-core, and what you want is
nginx-extras (or nginx-full, didn't test). If you don't use the proper nginx install, the module you need to work with django
will not be there.
https://www.digitalocean.com/community/articles/how-to-install-nginx-on-ubuntu-12-04-lts-precise-pangolin
sudo apt-get install nginx
sudo service nginx start
sudo apt-get install bind9
sudo apt-get install dnsutils
GO CHECK OUT readme_bind for the rest!!!!!!!
you'll need to create this directory, because the new bind install doesn't
mkdir /etc/bind/zones
https://nicolas.perriault.net/code/2012/gandi-standard-ssl-certificate-nginx/
https://help.ubuntu.com/community/FilePermissions
sudo mkdir /var/www
sudo chgrp webdev /var/www
sudo chmod -R 775 /var/www
NOTE: when installing python, you wnat the python-dev so you can compile the python-mysql code
sudo apt-get install python
sudo apt-get install python-dev
sudo apt-get install python-pip
sudo apt-get install supervisor
http://virtualenvwrapper.readthedocs.org/en/latest/
sudo pip install virtualenvwrapper
NOte: this is to get PIL working properly. You must install these
apt-get install libjpeg-dev
apt-get install zlib1g-dev
apt-get install libpng12-dev
set up ssh so you can easily log in:
http://www.thegeekstuff.com/2008/11/3-steps-to-perform-ssh-login-without-password-using-ssh-keygen-ssh-copy-id/
http://www.thegeekstuff.com/2010/04/how-to-fix-offending-key-in-sshknown_hosts-file/
NOTE: ssh-copy-id is not always available. Go here to get it: http://stackoverflow.com/questions/15185566/usr-bin-ssh-copy-id-line-1-ucgi-command-not-found
Here are the commands from that site:
sudo curl "hg.mindrot.org/openssh/raw-file/c746d1a70cfa/contrib/ssh-copy-id" -o /usr/bin/ssh-copy-id
sudo chmod +x /usr/bin/ssh-copy-id
****UPDATE****
ssh-copy-id is now a part of the openssh-client package. So just install it:
sudo apt-get install openssh-client
in case you have to replace an off ssh key:
--on linux--
ssh-keygen -f "/home/ronny/.ssh/known_hosts" -R dosriendas.com
--on a mac--
ssh-keygen -f "/Users/ronny/.ssh/known_hosts" -R dosriendas.com
Set up bitbucket:
https://confluence.atlassian.com/pages/viewpage.action?pageId=270827678
At this point you probably don't have an ssh key on the gandi server (I'm assuming a clean install here) so make sure you ssh-keygen
After you've done all that, and you've registered the new ssh key with bitbucket, find the "clone" option on the repository you want and pull it:
in the case of dosriendas you'll need these files:
hg clone ssh://[email protected]/ronnyabraham/elpotrero
hg clone ssh://[email protected]/ronnyabraham/django_initialize_project
hg clone ssh://[email protected]/ronnyabraham/dosriendas.prj
NOTE: keep in mind that the *.sh files are BASH files. DO NOT USE the "sh" command to execute them! Use "bash"!!!
NOTE: e.g. bash command.sh
bash bootstrap.sh
python publicdirectory.py
sudo python installconfs.py
mkdir /var/www/PROJECT.DOMAIN/logs/django
GEM_HOME=$ENVDIR (check out gunicorn.sh)
sudo apt-get install ruby
sudo apt-get install rubygems
http://honza.ca/2011/06/install-ruby-gems-into-virtualenv
NOTE: they say I should install those commands in the postactivate script. TRY TO SET IT UP
gem install sass
gem install compass
gem install zen-grids
NOTE: also install forward from forwardhq.com
gem install forward
http://stackoverflow.com/questions/13257431/django-mediagenerator-cant-find-sass
you have to set the compass files up for django-mediagenerator
python importsassframeworks | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/prism/components/prism-textile.js | (function(Prism) {
// We don't allow for pipes inside parentheses
// to not break table pattern |(. foo |). bar |
var modifierRegex = '(?:\\([^|)]+\\)|\\[[^\\]]+\\]|\\{[^}]+\\})+';
var modifierTokens = {
'css': {
pattern: /\{[^}]+\}/,
inside: {
rest: Prism.languages.css
}
},
'class-id': {
pattern: /(\()[^)]+(?=\))/,
lookbehind: true,
alias: 'attr-value'
},
'lang': {
pattern: /(\[)[^\]]+(?=\])/,
lookbehind: true,
alias: 'attr-value'
},
// Anything else is punctuation (the first pattern is for row/col spans inside tables)
'punctuation': /[\\\/]\d+|\S/
};
Prism.languages.textile = Prism.languages.extend('markup', {
'phrase': {
pattern: /(^|\r|\n)\S[\s\S]*?(?=$|\r?\n\r?\n|\r\r)/,
lookbehind: true,
inside: {
// h1. Header 1
'block-tag': {
pattern: RegExp('^[a-z]\\w*(?:' + modifierRegex + '|[<>=()])*\\.'),
inside: {
'modifier': {
pattern: RegExp('(^[a-z]\\w*)(?:' + modifierRegex + '|[<>=()])+(?=\\.)'),
lookbehind: true,
inside: Prism.util.clone(modifierTokens)
},
'tag': /^[a-z]\w*/,
'punctuation': /\.$/
}
},
// # List item
// * List item
'list': {
pattern: RegExp('^[*#]+(?:' + modifierRegex + ')?\\s+.+', 'm'),
inside: {
'modifier': {
pattern: RegExp('(^[*#]+)' + modifierRegex),
lookbehind: true,
inside: Prism.util.clone(modifierTokens)
},
'punctuation': /^[*#]+/
}
},
// | cell | cell | cell |
'table': {
// Modifiers can be applied to the row: {color:red}.|1|2|3|
// or the cell: |{color:red}.1|2|3|
pattern: RegExp('^(?:(?:' + modifierRegex + '|[<>=()^~])+\\.\\s*)?(?:\\|(?:(?:' + modifierRegex + '|[<>=()^~_]|[\\\\/]\\d+)+\\.)?[^|]*)+\\|', 'm'),
inside: {
'modifier': {
// Modifiers for rows after the first one are
// preceded by a pipe and a line feed
pattern: RegExp('(^|\\|(?:\\r?\\n|\\r)?)(?:' + modifierRegex + '|[<>=()^~_]|[\\\\/]\\d+)+(?=\\.)'),
lookbehind: true,
inside: Prism.util.clone(modifierTokens)
},
'punctuation': /\||^\./
}
},
'inline': {
pattern: RegExp('(\\*\\*|__|\\?\\?|[*_%@+\\-^~])(?:' + modifierRegex + ')?.+?\\1'),
inside: {
// Note: superscripts and subscripts are not handled specifically
// *bold*, **bold**
'bold': {
pattern: RegExp('(^(\\*\\*?)(?:' + modifierRegex + ')?).+?(?=\\2)'),
lookbehind: true
},
// _italic_, __italic__
'italic': {
pattern: RegExp('(^(__?)(?:' + modifierRegex + ')?).+?(?=\\2)'),
lookbehind: true
},
// ??cite??
'cite': {
pattern: RegExp('(^\\?\\?(?:' + modifierRegex + ')?).+?(?=\\?\\?)'),
lookbehind: true,
alias: 'string'
},
// @code@
'code': {
pattern: RegExp('(^@(?:' + modifierRegex + ')?).+?(?=@)'),
lookbehind: true,
alias: 'keyword'
},
// +inserted+
'inserted': {
pattern: RegExp('(^\\+(?:' + modifierRegex + ')?).+?(?=\\+)'),
lookbehind: true
},
// -deleted-
'deleted': {
pattern: RegExp('(^-(?:' + modifierRegex + ')?).+?(?=-)'),
lookbehind: true
},
// %span%
'span': {
pattern: RegExp('(^%(?:' + modifierRegex + ')?).+?(?=%)'),
lookbehind: true
},
'modifier': {
pattern: RegExp('(^\\*\\*|__|\\?\\?|[*_%@+\\-^~])' + modifierRegex),
lookbehind: true,
inside: Prism.util.clone(modifierTokens)
},
'punctuation': /[*_%?@+\-^~]+/
}
},
// [alias]http://example.com
'link-ref': {
pattern: /^\[[^\]]+\]\S+$/m,
inside: {
'string': {
pattern: /(\[)[^\]]+(?=\])/,
lookbehind: true
},
'url': {
pattern: /(\])\S+$/,
lookbehind: true
},
'punctuation': /[\[\]]/
}
},
// "text":http://example.com
// "text":link-ref
'link': {
pattern: RegExp('"(?:' + modifierRegex + ')?[^"]+":.+?(?=[^\\w/]?(?:\\s|$))'),
inside: {
'text': {
pattern: RegExp('(^"(?:' + modifierRegex + ')?)[^"]+(?=")'),
lookbehind: true
},
'modifier': {
pattern: RegExp('(^")' + modifierRegex),
lookbehind: true,
inside: Prism.util.clone(modifierTokens)
},
'url': {
pattern: /(:).+/,
lookbehind: true
},
'punctuation': /[":]/
}
},
// !image.jpg!
// !image.jpg(Title)!:http://example.com
'image': {
pattern: RegExp('!(?:' + modifierRegex + '|[<>=()])*[^!\\s()]+(?:\\([^)]+\\))?!(?::.+?(?=[^\\w/]?(?:\\s|$)))?'),
inside: {
'source': {
pattern: RegExp('(^!(?:' + modifierRegex + '|[<>=()])*)[^!\\s()]+(?:\\([^)]+\\))?(?=!)'),
lookbehind: true,
alias: 'url'
},
'modifier': {
pattern: RegExp('(^!)(?:' + modifierRegex + '|[<>=()])+'),
lookbehind: true,
inside: Prism.util.clone(modifierTokens)
},
'url': {
pattern: /(:).+/,
lookbehind: true
},
'punctuation': /[!:]/
}
},
// Footnote[1]
'footnote': {
pattern: /\b\[\d+\]/,
alias: 'comment',
inside: {
'punctuation': /\[|\]/
}
},
// CSS(Cascading Style Sheet)
'acronym': {
pattern: /\b[A-Z\d]+\([^)]+\)/,
inside: {
'comment': {
pattern: /(\()[^)]+(?=\))/,
lookbehind: true
},
'punctuation': /[()]/
}
},
// Prism(C)
'mark': {
pattern: /\b\((?:TM|R|C)\)/,
alias: 'comment',
inside: {
'punctuation':/[()]/
}
}
}
}
});
var nestedPatterns = {
'inline': Prism.util.clone(Prism.languages.textile['phrase'].inside['inline']),
'link': Prism.util.clone(Prism.languages.textile['phrase'].inside['link']),
'image': Prism.util.clone(Prism.languages.textile['phrase'].inside['image']),
'footnote': Prism.util.clone(Prism.languages.textile['phrase'].inside['footnote']),
'acronym': Prism.util.clone(Prism.languages.textile['phrase'].inside['acronym']),
'mark': Prism.util.clone(Prism.languages.textile['phrase'].inside['mark'])
};
// Only allow alpha-numeric HTML tags, not XML tags
Prism.languages.textile.tag.pattern = /<\/?(?!\d)[a-z0-9]+(?:\s+[^\s>\/=]+(?:=(?:("|')(?:\\[\s\S]|(?!\1)[^\\])*\1|[^\s'">=]+))?)*\s*\/?>/i;
// Allow some nesting
Prism.languages.textile['phrase'].inside['inline'].inside['bold'].inside = nestedPatterns;
Prism.languages.textile['phrase'].inside['inline'].inside['italic'].inside = nestedPatterns;
Prism.languages.textile['phrase'].inside['inline'].inside['inserted'].inside = nestedPatterns;
Prism.languages.textile['phrase'].inside['inline'].inside['deleted'].inside = nestedPatterns;
Prism.languages.textile['phrase'].inside['inline'].inside['span'].inside = nestedPatterns;
// Allow some styles inside table cells
Prism.languages.textile['phrase'].inside['table'].inside['inline'] = nestedPatterns['inline'];
Prism.languages.textile['phrase'].inside['table'].inside['link'] = nestedPatterns['link'];
Prism.languages.textile['phrase'].inside['table'].inside['image'] = nestedPatterns['image'];
Prism.languages.textile['phrase'].inside['table'].inside['footnote'] = nestedPatterns['footnote'];
Prism.languages.textile['phrase'].inside['table'].inside['acronym'] = nestedPatterns['acronym'];
Prism.languages.textile['phrase'].inside['table'].inside['mark'] = nestedPatterns['mark'];
}(Prism)); | PypiClean |
/NNGT-2.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl/nngt/geometry/svgtools.py |
from copy import deepcopy
from itertools import chain
from xml.dom.minidom import parse
from svg.path import (parse_path, CubicBezier, QuadraticBezier, Arc,
Move, Close, Path)
import shapely
from shapely.affinity import scale, affine_transform, translate
from shapely.geometry import Point, Polygon
import numpy as np
from .shape import Shape
'''
Shape generation from SVG files.
'''
__all__ = ["polygons_from_svg"]
# predefined svg shapes and their parameters
_predefined = {
'path': None,
'ellipse': ("cx", "cy", "rx", "ry"),
'circle': ("cx", "cy", "r"),
'rect': ("x", "y", "width", "height")
}
_valid_nodes = _predefined.keys()
def polygons_from_svg(filename, interpolate_curve=50, parent=None,
return_points=False):
'''
Generate :class:`shapely.geometry.Polygon` objects from an SVG file.
'''
svg = parse(filename)
elt_structs = {k: [] for k in _valid_nodes}
elt_points = {k: [] for k in _valid_nodes}
# get the properties of all predefined elements
for elt_type, elt_prop in _predefined.items():
_build_struct(svg, elt_structs[elt_type], elt_type, elt_prop)
# build all shapes
polygons = []
for elt_type, instructions in elt_structs.items():
for struct in instructions:
polygon, points = _make_polygon(
elt_type, struct, parent=parent, return_points=True)
polygons.append(polygon)
elt_points[elt_type].append(points)
if return_points:
return polygons, elt_points
return polygons
# ----- #
# Tools #
# ----- #
def _get_closed_subpaths(path):
'''
Generates all closed subpaths raises error if open subpaths exist.
Credit to @tatarize:
https://github.com/regebro/svg.path/issues/54#issuecomment-570101018
'''
segments = None
for p in path:
if isinstance(p, Move):
if segments is not None:
raise RuntimeError("Only closed shapes accepted.")
segments = []
segments.append(p)
if isinstance(p, Close):
yield Path(*segments)
segments = None
def _get_points(path, interpolate_curve=50):
''' Get points from path. '''
points = []
for item in path:
if isinstance(item, (Arc, CubicBezier, QuadraticBezier)):
istart = 1. / interpolate_curve
for frac in np.linspace(istart, 1, interpolate_curve):
points.append(
(item.point(frac).real, item.point(frac).imag))
else:
points.append((item.start.real, item.start.imag))
return points
def _get_outer_shell(paths_points):
''' Returns the index of the container subpath '''
minx, maxx, miny, maxy = np.inf, -np.inf, np.inf, -np.inf
container_idx = None
for i, pp in enumerate(paths_points):
arr = np.array(pp)
x_min = np.min(arr[:, 0])
x_max = np.max(arr[:, 0])
y_min = np.min(arr[:, 1])
y_max = np.max(arr[:, 1])
winner = True
if x_min <= minx:
minx = x_min
else:
winner = False
if x_max >= maxx:
maxx = x_max
else:
winner = False
if y_min <= miny:
miny = y_min
else:
winner = False
if y_max >= maxy:
maxy = y_max
else:
winner = False
if winner:
container_idx = i
return container_idx
def _build_struct(svg, container, elt_type, elt_properties):
root = svg.documentElement
for elt in root.getElementsByTagName(elt_type):
struct = {
"transf": [],
"transfdata": []
}
parent = elt.parentNode
while parent is not None:
_get_transform(parent, struct)
parent = parent.parentNode
_get_transform(elt, struct)
if elt_type == 'path':
path, trans = elt.getAttribute('d'), None
struct["path"] = path
else:
for item in elt_properties:
struct[item] = float(elt.getAttribute(item))
container.append(struct)
def _make_polygon(elt_type, instructions, parent=None, interpolate_curve=50,
return_points=False):
container = None
shell = [] # outer points defining the polygon's outer shell
holes = [] # inner points defining holes
idx_start = 0
if elt_type == "path": # build polygons from custom paths
path_data = parse_path(instructions["path"])
subpaths = [subpath for subpath in
_get_closed_subpaths(path_data)]
points = [_get_points(subpath) for subpath in subpaths]
# get the container
idx_container = _get_outer_shell(points)
shell = np.array(points[idx_container])
# get the holes and make the shape
holes = [pp for i, pp in enumerate(points) if i!= idx_container]
container = Polygon(shell, holes=holes)
elif elt_type == "ellipse": # build ellipses
circle = Point((instructions["cx"], instructions["cy"])).buffer(1)
rx, ry = instructions["rx"], instructions["ry"]
container = scale(circle, rx, ry)
elif elt_type == "circle": # build circles
r = instructions["r"]
container = Point((instructions["cx"], instructions["cy"])).buffer(r)
elif elt_type == "rect": # build rectangles
x, y = instructions["x"], instructions["y"]
w, h = instructions["width"], instructions["height"]
shell = np.array([(x, y), (x + w, y), (x + w, y + h), (x, y + h)])
container = Polygon(shell)
else:
raise RuntimeError("Unexpected element type: '{}'.".format(elt_type))
# transforms
nn, dd = instructions["transf"][::-1], instructions["transfdata"][::-1]
for name, data in zip(nn, dd):
if name == "matrix":
container = affine_transform(container, data)
elif name == "translate":
container = translate(container, *data)
# y axis is inverted in SVG, so make mirror transform
container = affine_transform(container, (1, 0, 0, -1, 0, 0))
shell = np.array(container.exterior.coords)
if return_points:
return container, shell
return container
def _get_transform(obj, tdict):
''' Get the transformation properties and name into `tdict` '''
try:
if obj.hasAttribute("transform"):
trans = obj.getAttribute('transform')
if trans.startswith("translate"):
start = trans.find("(") + 1
stop = trans.find(")")
tdict["transf"].append("translate")
tdict["transfdata"].append(
[float(f) for f in trans[start:stop].split(",")])
elif trans.startswith("matrix"):
start = trans.find("(") + 1
stop = trans.find(")")
trans = [float(f)
for f in trans[start:stop].split(",")]
tdict["transf"].append("matrix")
tdict["transfdata"].append(trans)
else:
raise RuntimeError("Uknown transform: " + trans)
except:
pass | PypiClean |
/InvokeAI-3.1.0-py3-none-any.whl/invokeai/backend/model_management/models/__init__.py | import inspect
from enum import Enum
from pydantic import BaseModel
from typing import Literal, get_origin
from .base import ( # noqa: F401
BaseModelType,
ModelType,
SubModelType,
ModelBase,
ModelConfigBase,
ModelVariantType,
SchedulerPredictionType,
ModelError,
SilenceWarnings,
ModelNotFoundException,
InvalidModelException,
DuplicateModelException,
)
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .sdxl import StableDiffusionXLModel
from .vae import VaeModel
from .lora import LoRAModel
from .controlnet import ControlNetModel # TODO:
from .textual_inversion import TextualInversionModel
from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model
MODEL_CLASSES = {
BaseModelType.StableDiffusion1: {
ModelType.ONNX: ONNXStableDiffusion1Model,
ModelType.Main: StableDiffusion1Model,
ModelType.Vae: VaeModel,
ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel,
},
BaseModelType.StableDiffusion2: {
ModelType.ONNX: ONNXStableDiffusion2Model,
ModelType.Main: StableDiffusion2Model,
ModelType.Vae: VaeModel,
ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel,
},
BaseModelType.StableDiffusionXL: {
ModelType.Main: StableDiffusionXLModel,
ModelType.Vae: VaeModel,
# will not work until support written
ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel,
ModelType.ONNX: ONNXStableDiffusion2Model,
},
BaseModelType.StableDiffusionXLRefiner: {
ModelType.Main: StableDiffusionXLModel,
ModelType.Vae: VaeModel,
# will not work until support written
ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel,
ModelType.ONNX: ONNXStableDiffusion2Model,
},
# BaseModelType.Kandinsky2_1: {
# ModelType.Main: Kandinsky2_1Model,
# ModelType.MoVQ: MoVQModel,
# ModelType.Lora: LoRAModel,
# ModelType.ControlNet: ControlNetModel,
# ModelType.TextualInversion: TextualInversionModel,
# },
}
MODEL_CONFIGS = list()
OPENAPI_MODEL_CONFIGS = list()
class OpenAPIModelInfoBase(BaseModel):
model_name: str
base_model: BaseModelType
model_type: ModelType
for base_model, models in MODEL_CLASSES.items():
for model_type, model_class in models.items():
model_configs = set(model_class._get_configs().values())
model_configs.discard(None)
MODEL_CONFIGS.extend(model_configs)
# LS: sort to get the checkpoint configs first, which makes
# for a better template in the Swagger docs
for cfg in sorted(model_configs, key=lambda x: str(x)):
model_name, cfg_name = cfg.__qualname__.split(".")[-2:]
openapi_cfg_name = model_name + cfg_name
if openapi_cfg_name in vars():
continue
api_wrapper = type(
openapi_cfg_name,
(cfg, OpenAPIModelInfoBase),
dict(
__annotations__=dict(
model_type=Literal[model_type.value],
),
),
)
# globals()[openapi_cfg_name] = api_wrapper
vars()[openapi_cfg_name] = api_wrapper
OPENAPI_MODEL_CONFIGS.append(api_wrapper)
def get_model_config_enums():
enums = list()
for model_config in MODEL_CONFIGS:
if hasattr(inspect, "get_annotations"):
fields = inspect.get_annotations(model_config)
else:
fields = model_config.__annotations__
try:
field = fields["model_format"]
except Exception:
raise Exception("format field not found")
# model_format: None
# model_format: SomeModelFormat
# model_format: Literal[SomeModelFormat.Diffusers]
# model_format: Literal[SomeModelFormat.Diffusers, SomeModelFormat.Checkpoint]
if isinstance(field, type) and issubclass(field, str) and issubclass(field, Enum):
enums.append(field)
elif get_origin(field) is Literal and all(
isinstance(arg, str) and isinstance(arg, Enum) for arg in field.__args__
):
enums.append(type(field.__args__[0]))
elif field is None:
pass
else:
raise Exception(f"Unsupported format definition in {model_configs.__qualname__}")
return enums | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/baselib/jpackages/JPackageClient.py | import math
from JumpScale import j
from JPackageObject import JPackageObject
from Domain import Domain
try:
import JumpScale.baselib.circus
except:
pass
try:
import JumpScale.baselib.expect
except:
pass
from JumpScale.baselib import platforms
class JPackageClient():
sourcesFile = None
"""
methods to deal with jpackages, seen from client level
@qlocation j.packages
"""
def __init__(self):
"""
"""
j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.packageDir, "metadata"))
j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.packageDir, "files"))
j.system.fs.createDir(j.system.fs.joinPaths(j.dirs.packageDir, "metatars"))
self.domains=[]
self._metadatadirTmp=j.system.fs.joinPaths(j.dirs.varDir,"tmp","jpackages","md")
j.system.fs.createDir(self._metadatadirTmp)
# can't ask username here
# because jumpscale is not interactive yet
# So we ask the username/passwd lazy in the domain object
# j.packages.markConfigurationPending=self._runPendingReconfigeFiles
self.reloadconfig()
self.enableConsoleLogging()
self.logenable=True
self.loglevel=5
self.errors=[]
def reportError(self,msg):
self.errors.append(msg)
def log(self,msg,category="",level=5):
if level<self.loglevel+1 and self.logenable:
j.logger.log(msg,category="jpackage.%s"%category,level=level)
def enableConsoleLogging(self):
j.logger.consoleloglevel=6
j.logger.consolelogCategories.append("jpackage")
j.logger.consolelogCategories.append("blobstor")
def getJPackageMetadataScanner(self):
"""
returns tool which can be used to scan the jpackages repo's and manipulate them
"""
from core.jpackages.JPackageMetadataScanner import JPackageMetadataScanner
return JPackageMetadataScanner()
def _renew(self):
j.packages = JPackageClient()
def checkProtectedDirs(self,redo=True,checkInteractive=True):
"""
recreate the config file for protected dirs (means directories linked to code repo's)
by executing this command you are sure that no development data will be overwritten
@param redo means, restart from existing links in qbase, do not use the config file
@checkInteractive if False, will not ask just execute on it
"""
result,llist=j.system.process.execute("find /opt/qbase5 -type l")
lines=[item for item in llist.split("\n") if item.strip()<>""]
if len(lines)>0:
cfgpath=j.system.fs.joinPaths(j.dirs.cfgDir,"debug","protecteddirs","protected.cfg")
if redo==False and j.system.fs.exists(cfgpath):
llist=j.system.fs.fileGetContents(cfgpath)
lines.extend([item for item in llist.split("\n") if item.strip()<>""])
prev=""
lines2=[]
lines.sort()
for line in lines:
if line<>prev:
lines2.append(line)
prev=line
out="\n".join(lines2)
do=False
if checkInteractive:
if j.console.askYesNo("Do you want to make sure that existing linked dirs are not overwritten by installer? \n(if yes the linked dirs will be put in protected dir configuration)\n"):
do=True
else:
do=True
if do:
j.system.fs.writeFile(cfgpath,out)
def reloadconfig(self):
"""
Reload all jpackages config data from disk
"""
cfgpath=j.system.fs.joinPaths(j.dirs.cfgDir, 'jpackages', 'sources.cfg')
if not j.system.fs.exists(cfgpath):
#check if there is old jpackages dir
cfgpathOld=j.system.fs.joinPaths(j.dirs.cfgDir, 'jpackages', 'sources.cfg')
if j.system.fs.exists(cfgpathOld):
j.system.fs.renameDir(j.system.fs.joinPaths(j.dirs.cfgDir, 'jpackages'),j.system.fs.joinPaths(j.dirs.cfgDir, 'jpackages'))
if not j.system.fs.exists(cfgpath):
j.system.fs.createDir(j.system.fs.getDirName(cfgpath))
else:
cfg = j.tools.inifile.open(cfgpath)
self.sourcesConfig=cfg
domainDict = dict()
for domains in self.domains:
domainDict[domains.domainname] = domains
for domain in cfg.getSections():
if domain in domainDict.keys():
self.domains.remove(domainDict[domain])
self.domains.append(Domain(domainname=domain))
def create(self, domain="", name="", version="1.0", description="", supportedPlatforms=None):
"""
Creates a new jpackages4, this includes all standard tasklets, a config file and a description.wiki file
@param domain: string - The domain the new jpackages should reside in
@param name: string - The name of the new jpackages
@param version: string - The version of the new jpackages
@param description: string - The description of the new jpackages (is stored in the description.wiki file)
@param supportedPlatforms ["linux",...] other examples win,win32,linux64 see j.system.platformtype
"""
if j.application.shellconfig.interactive:
domain = j.console.askChoice(j.packages.getDomainNames(), "Please select a domain")
j.packages.getDomainObject(domain)._ensureDomainCanBeUpdated() #@question what does this do?
name = j.console.askString("Please provide a name")
version = j.console.askString("Please provide a version","1.0")
descr = j.console.askString("Please provide a description","")
while not supportedPlatforms:
supportedPlatforms = j.console.askChoiceMultiple(sorted(j.system.platformtype.getPlatforms()), 'Please enumerate the supported platforms')
if domain=="" or name=="":
raise RuntimeError("domain or name at least needs to be specified")
supportedPlatforms=[str(item) for item in supportedPlatforms]
# Create one in the repo
if not domain in j.packages.getDomainNames():
raise RuntimeError('Provided domain is nonexistent on this system')
if self.getDomainObject(domain).metadataFromTgz:
raise RuntimeError('The meta data for domain ' + domain + ' is coming from a tgz, you cannot create new packages in it.')
jp = JPackageObject(domain, name, version)
#jp.prepareForUpdatingFiles(suppressErrors=True)
jp.supportedPlatforms = supportedPlatforms
jp.description=description
jp.save()
j.system.fs.createDir(jp.getPathFiles())
j.system.fs.createDir(j.system.fs.joinPaths(jp.getPathFiles(),"generic"))
for pl in supportedPlatforms:
j.system.fs.createDir(j.system.fs.joinPaths(jp.getPathFiles(),"%s"%pl))
return jp
############################################################
################## GET FUNCTIONS #########################
############################################################
def get(self, domain, name, version):
"""
Returns a jpackages
@param domain: string - The domain the jpackages is part from
@param name: string - The name of the jpackages
@param version: string - The version of the jpackages
"""
# return a package from the default repo
key = '%s%s%s' % (domain,name,version)
if self._getcache.has_key(key):
return self._getcache[key]
if self.exists(domain,name,version)==False:
raise RuntimeError("Could not find package %s." % self.getMetadataPath(domain,name,version))
self._getcache[key]=JPackageObject(domain, name, version)
return self._getcache[key]
def exists(self,domain,name,version):
"""
Checks whether the jpackages's metadata path is currently present on your system
"""
return j.system.fs.exists(self.getMetadataPath(domain,name,version))
def getInstalledPackages(self):
"""
Returns a list of all currently installed packages on your system
"""
return [p for p in self.getJPackageObjects(j.system.platformtype.myplatform) if p.isInstalled()]
def getDebugPackages(self):
"""
Returns a list of all currently installed packages on your system
"""
return [p for p in self.getJPackageObjects(j.system.platformtype.myplatform) if int(p.state.debugMode)==1]
def getPackagesWithBrokenDependencies(self):
"""
Returns a list of all jpackages which have dependencies that cannot be resolved
"""
return [package for package in self.getJPackageObjects() if len(package.getBrokenDependencies()) > 0]
def getPendingReconfigurationPackages(self):
"""
Returns a List of all jpackages that are pending for configuration
"""
return filter(lambda jpackages: jpackages.isPendingReconfiguration(), self.getJPackageObjects())
#############################################################
###################### DOMAINS ############################
#############################################################
def getDomainObject(self,domain,qualityLevel=None):
"""
Get provided domain as an object
"""
if qualityLevel==None:
for item in self.domains:
if item.domainname.lower()==domain.lower().strip():
return item
else:
return Domain(domain,qualityLevel)
raise RuntimeError("Could not find jpackages domain %s" % domain)
def getDomainNames(self):
"""
Returns a list of all domains present in the sources.cfg file
"""
result=[]
for item in self.domains:
result.append(item.domainname)
return result
############################################################
################### GET PATH FUNCTIONS ###################
############################################################
def getJPActionsPath(self,domain,name,version,fromtmp=False):
"""
Returns the metadatapath for the provided jpackages
if fromtmp is True, then tmp directorypath will be returned
@param domain: string - The domain of the jpackages
@param name: string - The name of the jpackages
@param version: string - The version of the jpackages
@param fromtmp: boolean
"""
if fromtmp:
self._metadatadirTmp
return j.system.fs.joinPaths(self._metadatadirTmp,domain,name,version,"actions")
else:
return j.system.fs.joinPaths(j.dirs.packageDir, "active", domain,name,version,"actions")
def getJPActiveHRDPath(self,domain,name,version,fromtmp=False):
"""
Returns the metadatapath for the provided jpackages
if fromtmp is True, then tmp directorypath will be returned
@param domain: string - The domain of the jpackages
@param name: string - The name of the jpackages
@param version: string - The version of the jpackages
@param fromtmp: boolean
"""
if fromtmp:
self._metadatadirTmp
return j.system.fs.joinPaths(self._metadatadirTmp,domain,name,version,"hrd")
else:
return j.system.fs.joinPaths(j.dirs.packageDir, "active", domain,name,version,"hrd")
def getMetadataPath(self,domain,name,version):
"""
Returns the metadatapath for the provided jpackages for active state
@param domain: string - The domain of the jpackages
@param name: string - The name of the jpackages
@param version: string - The version of the jpackages
@param fromtmp: boolean
"""
return j.system.fs.joinPaths(j.dirs.packageDir, "metadata", domain,name,version)
def getDataPath(self,domain,name,version):
"""
Returns the filesdatapath for the provided jpackages
@param domain: string - The domain of the jpackages
@param name: string - The name of the jpackages
@param version: string - The version of the jpackages
"""
return j.system.fs.joinPaths(j.dirs.packageDir, "files", domain,name,version)
def getMetaTarPath(self, domainName):
"""
Returns the metatarsdatapath for the provided domain
"""
return j.system.fs.joinPaths(j.dirs.packageDir, "metatars", domainName)
############################################################
###################### CACHING ###########################
############################################################
_getcache = {}
def _deleteFromCache(self, domain, name, version):
#called by a package when we call delete on it so it can be garbage collected
key = '%s%s%s' % (domain, name, version)
self._getcache.remove(key)
############################################################
########################## FIND ##########################
############################################################
def findNewest(self, domain="",name="", minversion="",maxversion="",platform=None, returnNoneIfNotFound=False):
"""
Find the newest jpackages which matches the criteria
If more than 1 jpackages matches -> error
If no jpackages match and not returnNoneIfNotFound -> error
@param name: string - The name of jpackages you are looking for
@param domain: string - The domain of the jpackages you are looking for
@param minversion: string - The minimum version the jpackages must have
@param maxversion: string - The maximum version the jpackages can have
@param platform: string - Which platform the jpackages must run on
@param returnNoneIfNotFound: boolean - if true, will return None object if no jpackages have been found
"""
results=self.find(domain=domain,name=name)
# results=[]
# for item in results0:
# if item.supportsPlatform(platform=None):
# results.append(item)
namefound=""
domainfound=""
if minversion=="":
minversion="0"
if maxversion=="" or maxversion=="0":
maxversion="100.100.100"
#look for duplicates
for jp in results:
if namefound=="":
namefound=jp.name
if domainfound=="":
domainfound=jp.domain
if jp.domain<>domainfound or jp.name<>namefound:
packagesStr="\n"
for jp2 in results:
packagesStr=" %s\n" % str(jp2)
raise RuntimeError("Found more than 1 jpackages matching the criteria.\n %s" % packagesStr)
#check for version match
if len(results)==0:
if returnNoneIfNotFound:
return None
raise RuntimeError("Did not find jpackages with criteria domain:%s, name:%s, platform:%s (independant from version)" % (domain,name,platform))
# filter packages so they are between min and max version bounds
result=[jp for jp in results if self._getVersionAsInt(minversion)<=self._getVersionAsInt(jp.version)<=self._getVersionAsInt(maxversion)]
result.sort(lambda jp1, jp2: - int(self._getVersionAsInt(jp1.version) - self._getVersionAsInt(jp2.version)))
if not result:
if returnNoneIfNotFound:
return None
raise RuntimeError("Did not find jpackages with criteria domain:%s, name:%s, minversion:%s, maxversion:%s, platform:%s" % (domain,name,minversion,maxversion,platform))
return result[0]
def findByName(self,name):
'''
name is part of jpackage, if none found return None, if more than 1 found raise error, name is part of name
'''
if name.find("*")==-1:
name+="*"
return self.find(name=name,domain="")
def find(self, domain=None,name=None , version="", platform=None,onlyone=False,installed=None):
"""
@domain, if none will ask for domain
"""
if domain==None:
domains=j.console.askChoiceMultiple(j.packages.getDomainNames())
result=[]
for domain in domains:
result+=self.find(domain=domain,name=name , version=version, platform=platform,onlyone=onlyone,installed=installed)
return result
if name==None:
name = j.console.askString("Please provide the name or part of the name of the package to search for (e.g *extension* -> lots of extensions)")
res = self._find(domain=domain, name=name, version=version)
if not res:
j.console.echo('No packages found, did you forget to run jpackage_update?')
if installed==True:
res=[item for item in res if item.isInstalled()]
if onlyone:
if len(res) > 1:
res = [j.console.askChoice(res, "Multiple packages found, please choose one")]
return res
def _find(self, domain="",name="", version=""):
"""
Tries to find a package based on the provided criteria
You may also use a wildcard to provide the name or domain (*partofname*)
@param domain: string - The name of jpackages domain, when using * means partial name
@param name: string - The name of the jpackages you are looking for
@param version: string - The version of the jpackages you are looking for
"""
j.logger.log("Find jpackages domain:%s name:%s version:%s" %(domain,name,version))
#work with some functional methods works faster than doing the check everytime
def findPartial(pattern,text):
pattern=pattern.replace("*","")
if text.lower().find(pattern.lower().strip())<>-1:
return True
return False
def findFull(pattern,text):
return pattern.strip().lower()==text.strip().lower()
def alwaysReturnTrue(pattern,text):
return True
domainFindMethod=alwaysReturnTrue
nameFindMethod=alwaysReturnTrue
versionFindMethod=alwaysReturnTrue
if domain:
if domain.find("*")<>-1:
domainFindMethod=findPartial
else:
domainFindMethod=findFull
if name:
if name.find("*")<>-1:
nameFindMethod=findPartial
else:
nameFindMethod=findFull
if version:
if version.find("*")<>-1:
versionFindMethod=findPartial
else:
versionFindMethod=findFull
result=[]
for p_domain, p_name, p_version in self._getJPackageTuples():
# print (p_domain, p_name, p_version)
if domainFindMethod(domain,p_domain) and nameFindMethod(name,p_name) and versionFindMethod(version,p_version):
result.append([p_domain, p_name, p_version])
result2=[]
for item in result:
result2.append(self.get(item[0],item[1], item[2]))
return result2
# Used in getJPackageObjects and that is use in find
def _getJPackageTuples(self):
res = list()
domains=self.getDomainNames()
for domainName in domains:
domainpath=j.system.fs.joinPaths(j.dirs.packageDir, "metadata", domainName)
if j.system.fs.exists(domainpath): #this follows the link
packages= [p for p in j.system.fs.listDirsInDir(domainpath,dirNameOnly=True) if p != '.hg'] # skip hg file
for packagename in packages:
packagepath=j.system.fs.joinPaths(domainpath,packagename)
versions=j.system.fs.listDirsInDir(packagepath,dirNameOnly=True)
for version in versions:
res.append([domainName,packagename,version])
return res
def getJPackageObjects(self, platform=None, domain=None):
"""
Returns a list of jpackages objects for specified platform & domain
"""
packageObjects = [self.get(*p) for p in self._getJPackageTuples()]
if platform==None:
return [p for p in packageObjects if (domain == None or p.domain == domain)]
def hasPlatform(package):
return any([supported in j.system.platformtype.getParents(platform) for supported in package.supportedPlatforms])
return [p for p in packageObjects if hasPlatform(p) and (domain == None or p.domain == domain)]
def getPackagesWithBrokenDependencies(self):
return [p for p in j.packages.find('*') if len(p.getBrokenDependencies()) > 0]
############################################################
################# UPDATE / PUBLISH #######################
############################################################
def init(self):
pass
def updateAll(self):
'''
Updates all installed jpackages to the latest builds.
The latest meta information is retrieved from the repository and based on this information,
The install packages that have a buildnr that has been outdated our reinstall, thust updating them to the latest build.
'''
# update all meta information:
self.updateMetaData()
# iterate over all install packages and install them
# only when they are outdated will they truly install
for p in self.getInstalledPackages():
p.install()
def updateMetaDataAll(self,force=False):
"""
Updates the metadata information of all jpackages
This used to be called updateJPackage list
@param is force True then local changes will be lost if any
"""
self.updateMetaData("",force)
def mergeMetaDataAll(self,):
"""
Tries to merge the metadata information of all jpackages with info on remote repo.
This used to be called updateJPackage list
"""
j.packages.mergeMetaData("")
def updateMetaDataForDomain(self,domainName=""):
"""
Updates the meta information of specific domain
This used to be called updateJPackage list
"""
if domainName=="":
domainName = j.console.askChoice(j.packages.getDomainNames(), "Please choose a domain")
j.packages.getDomainObject(domainName).updateMetadata("")
def linkMetaData(self,domain=""):
"""
Does an link of the meta information repo for each domain
"""
self.resetState()
if domain<>"":
j.logger.log("link metadata information for jpackages domain %s" % domain, 1)
d=self.getDomainObject(domain)
d.linkMetadata()
else:
domainnames=self.getDomainNames()
for domainName in domainnames:
self.linkMetaData(domainName)
def updateMetaData(self,domain="",force=False):
"""
Does an update of the meta information repo for each domain
"""
# self.resetState()
if domain<>"":
j.logger.log("Update metadata information for jpackages domain %s" % domain, 1)
d=self.getDomainObject(domain)
d.updateMetadata(force=force)
else:
domainnames=self.getDomainNames()
for domainName in domainnames:
self.updateMetaData(domainName, force=force)
def mergeMetaData(self,domain="", commitMessage=''):
"""
Does an update of the meta information repo for each domain
"""
if not j.application.shellconfig.interactive:
if commitMessage == '':
raise RuntimeError('Need commit message')
if domain<>"":
j.logger.log("Merge metadata information for jpackages domain %s" % domain, 1)
d=self.getDomainObject(domain)
d.mergeMetadata(commitMessage=commitMessage)
else:
for domainName in self.getDomainNames():
self.mergeMetaData(domainName, commitMessage=commitMessage)
def _getQualityLevels(self,domain):
cfg=self.sourcesConfig
bitbucketreponame=cfg.getValue( domain, 'bitbucketreponame')
bitbucketaccount=cfg.getValue( domain, 'bitbucketaccount')
qualityLevels=j.system.fs.listDirsInDir(j.system.fs.joinPaths(j.dirs.codeDir,bitbucketaccount,bitbucketreponame),dirNameOnly=True)
qualityLevels=[item for item in qualityLevels if item<>".hg"]
return qualityLevels
def _getMetadataDir(self,domain,qualityLevel=None,descr=""):
cfg=self.sourcesConfig
bitbucketreponame=cfg.getValue( domain, 'bitbucketreponame')
bitbucketaccount=cfg.getValue( domain, 'bitbucketaccount')
if descr=="":
descr="please select your qualitylevel"
if qualityLevel==None or qualityLevel=="":
qualityLevel=j.console.askChoice(self._getQualityLevels(domain),descr)
return j.system.fs.joinPaths(j.dirs.codeDir,bitbucketaccount,bitbucketreponame,qualityLevel)
def metadataDeleteQualityLevel(self, domain="",qualityLevel=None):
"""
Delete a quality level
"""
if domain<>"":
j.logger.log("Delete quality level %s for %s." % (qualityLevel,domain), 1)
metadataPath=self._getMetadataDir(domain,qualityLevel)
j.system.fs.removeDirTree(metadataPath)
else:
if j.application.shellconfig.interactive:
domainnames=j.console.askChoiceMultiple(j.packages.getDomainNames())
else:
domainnames=self.getDomainNames()
for domainName in domainnames:
self.metadataDeleteQualityLevel(domainName,qualityLevel)
def metadataCreateQualityLevel(self, domain="",qualityLevelFrom=None,qualityLevelTo=None,force=False,link=True):
"""
Create a quality level starting from the qualitylevelFrom e.g. unstable to beta
@param link if True will link the jpackages otherwise copy
@param force, will delete the destination
"""
if domain<>"":
j.logger.log("Create quality level for %s from %s to %s" % (domain,qualityLevelFrom,qualityLevelTo), 1)
metadataFrom=self._getMetadataDir(domain,qualityLevelFrom,"please select your qualitylevel where you want to copy from for domain %s." % domain)
if qualityLevelTo==None or qualityLevelTo=="":
qualityLevelTo=j.console.askString("Please specify qualitylevel you would like to create for domain %s" % domain)
metadataTo=self._getMetadataDir(domain,qualityLevelTo)
dirsfrom=j.system.fs.listDirsInDir(metadataFrom)
if j.system.fs.exists(metadataTo):
if force or j.console.askYesNo("metadata dir %s exists, ok to remove?" % metadataTo):
j.system.fs.removeDirTree(metadataTo)
else:
raise RuntimeError("Cannot continue to create metadata for new qualitylevel, because dest dir exists")
j.system.fs.createDir(metadataTo)
for item in dirsfrom:
while j.system.fs.isLink(item):
#look for source of link
item=j.system.fs.readlink(item)
dirname=j.system.fs.getDirName( item+"/", lastOnly=True)
if link:
j.system.fs.symlink( item,j.system.fs.joinPaths(metadataTo,dirname),overwriteTarget=True)
else:
j.system.fs.copyDirTree(item, j.system.fs.joinPaths(metadataTo,dirname), keepsymlinks=False, eraseDestination=True)
else:
if j.application.shellconfig.interactive:
domainnames=j.console.askChoiceMultiple(j.packages.getDomainNames())
else:
domainnames=self.getDomainNames()
for domainName in domainnames:
self.metadataCreateQualityLevel(domainName,qualityLevelFrom,qualityLevelTo,force,link)
def publishMetaDataAsTarGz(self, domain="",qualityLevel=None):
"""
Compresses the meta data of a domain into a tar and upload that tar to the bundleUpload server.
After this the that uptain there metadata as a tar can download the latest metadata.
"""
if domains==[]:
domains=j.console.askChoiceMultiple(j.packages.getDomainNames(), "Please select a domain")
if len(domains)>1:
for domain in domains:
self.publishMetaDataAsTarGz(domain=domain,qualityLevel=qualityLevel)
else:
j.logger.log("Push metadata information for jpackages domain %s to reposerver." % domain, 1)
if qualityLevel=="all":
for ql in self._getQualityLevels(domain):
d = self.getDomainObject(domain,qualityLevel=ql)
d.publishMetaDataAsTarGz()
else:
d = self.getDomainObject(domain,qualityLevel=qualityLevel)
d.publishMetaDataAsTarGz()
def publish(self, commitMessage,domain=""):
"""
Publishes all domains' bundles & metadata (if no domain specified)
@param commitMessage: string - The commit message you want to assign to the publish
"""
if domain=="":
for domain in j.packages.getDomainNames():
self.publish( commitMessage=commitMessage,domain=domain)
else:
domainobject=j.packages.getDomainObject(domain)
domainobject.publish(commitMessage=commitMessage)
def publishAll(self, commitMessage=None):
"""
Publish metadata & bundles for all domains, for more informartion see publishDomain
"""
if not commitMessage:
commitMessage = j.console.askString('please enter a commit message')
for domain in j.packages.getDomainNames():
self.publishDomain(domain, commitMessage=commitMessage)
def publishDomain(self, domain="", commitMessage=None):
"""
Publish metadata & bundles for a domain.
To publish a domain means to make your local changes to the corresponding domain available to other users.
A domain can be changed in the following ways: a new package is created in it, a package in it is modified, a package in it is deleted.
To make the changes available to others the new metadata is uploaded to the mercurial servers and for the packages whos files
have been modified,
new bundles are created and uploaded to the blobstor server
"""
if domain=="":
domain=j.console.askChoice(j.packages.getDomainNames(), "Please select a domain")
self.getDomainObject(domain)._ensureDomainCanBeUpdated()
self.getDomainObject(domain).publish(commitMessage=commitMessage)
##########################################################
#################### RECONFIGURE #######################
##########################################################
def _setHasPackagesPendingConfiguration(self, value=True):
file = j.system.fs.joinPaths(j.dirs.baseDir, 'cfg', 'jpackages', 'reconfigure.cfg')
if not j.system.fs.exists(file):
ini_file = j.tools.inifile.new(file)
else:
ini_file = j.tools.inifile.open(file)
if not ini_file.checkSection('main'):
ini_file.addSection('main')
ini_file.setParam("main","hasPackagesPendingConfiguration", "1" if value else "0")
ini_file.write()
def _hasPackagesPendingConfiguration(self):
file = j.system.fs.joinPaths(j.dirs.baseDir, 'cfg', 'jpackages', 'reconfigure.cfg')
if not j.system.fs.exists(file):
return False
ini_file = j.tools.inifile.open(file)
if ini_file.checkSection('main'):
return ini_file.getValue("main","hasPackagesPendingConfiguration") == '1'
return False
def runConfigurationPending(self):
if not self._hasPackagesPendingConfiguration():
return
# Get all packages that need reconfiguring and reconfigure them
# We store the state to reconfigure them in their state files
configuredPackages = set()
currentPlatform = PlatformType.findPlatformType()
def configure(package):
# If already processed return
if package in configuredPackages:
return True
configuredPackages.add(package)
# first make sure depending packages are configured
for dp in package.getDependencies(recursive=False, platform=currentPlatform):
if not configure(dp):
return False
# now configure the package
if package.isPendingReconfiguration():
j.logger.log("jpackages %s %s %s needs reconfiguration" % (package.domain,package.name,package.version),3)
try:
package.configure()
except:
j.debugging.printTraceBack('Got error while reconfiguring ' + str(package))
if j.console.askChoice(['Skip this one', 'Go to shell'], 'What do you want to do?') == 'Skip this one':
return True
else:
return False
return True
pendingPackages = self.getPendingReconfigurationPackages()
hasPendingConfiguration = False
for p in pendingPackages:
if not configure(p):
hasPendingConfiguration = True
break
self._setHasPackagesPendingConfiguration(hasPendingConfiguration)
############################################################
################ SUPPORTING FUNCTIONS ####################
############################################################
def _getVersionAsInt(self,version):
"""
@param version is string
"""
if version.find(",")<>-1:
raise RuntimeError("version string can only contain numbers and . e.g. 1.1.1")
if version=="":
version="0"
if version.find(".")<>-1:
versions=version.split(".")
else:
versions=[version]
if len(versions)>4:
raise RuntimeError("max level of versionlevels = 4 e.g. max 1.1.1.1")
#make sure always 4 levels of versions for comparison
while(len(versions)<4):
versions.append("0")
result=0
for counter in range(0,len(versions)):
level=len(versions)-counter-1
if versions[counter]=="":
versions[counter]="0"
result=int(result+(math.pow(1000,level)*int(versions[counter].strip())))
return result
def pm_getJPackageConfig(self, jpackagesMDPath):
return JPackageConfig(jpackagesMDPath)
def makeDependencyGraph(self):
'''
Creates a graphical visualization of all dependencies between the JPackackages of all domains.
This helps to quickly view and debug the dependencies and avoid errors.
The target audience are the developers of accross groups and domains that depend on each others packages.
The graph can be found here:
/opt/qbase5/var/jpackages/metadata/dependencyGraph.png
Notes:
The graph omits the constraints, such as version numbers and platform.
For completeness, a second graph is created that shows packages without andy dependencies (both ways).
See: dependencyGraph_singleNodes.png
'''
from pygraphviz import AGraph #import only here to avoid overhead
def _getPackageTagName(obj, separator=' - '):
n = obj.name
#n += '\\n'
#n += obj.domain
return n
j.console.echo("Making Dependency graph ... please wait.")
platform = PlatformType.getByName('generic')
g=AGraph(strict=True,directed=True, compound=True)
g.graph_attr['rankdir']='LR'
g.graph_attr['ratio']=1.3
#Generate the graph
for pack in j.packages.getJPackageObjects():
dn= 'cluster_'+pack.domain
s= g.add_subgraph(name = dn)
s.add_node(_getPackageTagName(pack))
x=g.get_node(_getPackageTagName(pack))
x.attr['label']=_getPackageTagName(pack)
depList= pack.getDependencies(platform, recursive=False)
for dep in depList:
g.add_node(_getPackageTagName(dep))
g.add_edge(_getPackageTagName(pack),_getPackageTagName(dep))
#Separate nodes with and without links
singleNodes=[]
linkedNodes=[]
for n in g.nodes():
c=[]
c=g.neighbors(n)
if c==[]:
singleNodes.append(n)
else:
linkedNodes.append(n)
#Add the domain name to the graph
for pack in j.packages.getJPackageObjects():
n=pack.domain
dn= 'cluster_'+pack.domain
s= g.add_subgraph(name=dn)
s.add_node(n)
x=g.get_node(n)
x.attr['label']=n
x.attr['style']='filled'
x.attr['shape']='box'
#Create a second version, for the graph of single nodes
stemp=g.to_string()
s=AGraph(stemp)
for n in singleNodes:
g.delete_node(n)
for n in linkedNodes:
s.delete_node(n)
g.layout(prog='dot')
graphPath = j.system.fs.joinPaths(j.dirs.packageDir, 'metadata','dependencyGraph.png')
g.draw(graphPath)
s.layout(prog='dot')
graphPath = j.system.fs.joinPaths(j.dirs.packageDir, 'metadata','dependencyGraph_singleNodes.png')
s.draw(graphPath)
j.console.echo("Dependency graph successfully created. Open file at /opt/qbase5/var/jpackages/metadata/dependencyGraph.png") | PypiClean |
/Newsroom-1.0-py3-none-any.whl/newsroom/static/dist/navigations_js.9448ce7dce72c6bc137f.js | webpackJsonp([7],{
/***/ 1:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.notify = exports.now = undefined;
exports.createStore = createStore;
exports.render = render;
exports.gettext = gettext;
exports.getProductQuery = getProductQuery;
exports.shortDate = shortDate;
exports.getDateInputDate = getDateInputDate;
exports.getLocaleDate = getLocaleDate;
exports.isInPast = isInPast;
exports.fullDate = fullDate;
exports.formatTime = formatTime;
exports.formatDate = formatDate;
exports.getTextFromHtml = getTextFromHtml;
exports.wordCount = wordCount;
exports.toggleValue = toggleValue;
exports.updateRouteParams = updateRouteParams;
exports.formatHTML = formatHTML;
exports.initWebSocket = initWebSocket;
exports.errorHandler = errorHandler;
exports.getConfig = getConfig;
exports.getTimezoneOffset = getTimezoneOffset;
exports.isTouchDevice = isTouchDevice;
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _lodash = __webpack_require__(7);
var _reactRedux = __webpack_require__(6);
var _redux = __webpack_require__(43);
var _reduxLogger = __webpack_require__(47);
var _reduxThunk = __webpack_require__(48);
var _reduxThunk2 = _interopRequireDefault(_reduxThunk);
var _reactDom = __webpack_require__(25);
var _alertifyjs = __webpack_require__(49);
var _alertifyjs2 = _interopRequireDefault(_alertifyjs);
var _moment = __webpack_require__(3);
var _moment2 = _interopRequireDefault(_moment);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var now = exports.now = (0, _moment2.default)(); // to enable mocking in tests
var TIME_FORMAT = getConfig('time_format');
var DATE_FORMAT = getConfig('date_format');
var DATETIME_FORMAT = TIME_FORMAT + ' ' + DATE_FORMAT;
/**
* Create redux store with default middleware
*
* @param {func} reducer
* @return {Store}
*/
function createStore(reducer) {
var logger = (0, _reduxLogger.createLogger)({
duration: true,
collapsed: true,
timestamp: false
});
return (0, _redux.createStore)(reducer, (0, _redux.applyMiddleware)(_reduxThunk2.default, logger));
}
/**
* Render helper
*
* @param {Store} store
* @param {Component} App
* @param {Element} element
*/
function render(store, App, element) {
return (0, _reactDom.render)(_react2.default.createElement(
_reactRedux.Provider,
{ store: store },
_react2.default.createElement(App, null)
), element);
}
/**
* Noop for now, but it's better to use it from beginning.
*
* It handles interpolation:
*
* gettext('Hello {{ name }}', {name: 'John'});
*
* @param {String} text
* @param {Object} params
* @return {String}
*/
function gettext(text, params) {
var translated = text; // temporary
if (params) {
Object.keys(params).forEach(function (param) {
var paramRegexp = new RegExp('{{ ?' + param + ' ?}}', 'g');
translated = translated.replace(paramRegexp, params[param] || '');
});
}
return translated;
}
/**
* Returns query string query for a given product
*
* @param {Object} product
* @return {string}
*/
function getProductQuery(product) {
var q = product.sd_product_id ? 'products.code:' + product.sd_product_id : '';
q += product.query ? product.sd_product_id ? ' OR (' + product.query + ')' : product.query : '';
return q;
}
/**
* Parse given date string and return Date instance
*
* @param {String} dateString
* @return {Date}
*/
function parseDate(dateString) {
return (0, _moment2.default)(dateString);
}
/**
* Return date formatted for lists
*
* @param {String} dateString
* @return {String}
*/
function shortDate(dateString) {
var parsed = parseDate(dateString);
return parsed.format(isToday(parsed) ? TIME_FORMAT : DATE_FORMAT);
}
/**
* Return date formatted for date inputs
*
* @param {String} dateString
* @return {String}
*/
function getDateInputDate(dateString) {
if (dateString) {
var parsed = parseDate(dateString);
return parsed.format('YYYY-MM-DD');
}
return '';
}
/**
* Return locale date
*
* @param {String} dateString
* @return {String}
*/
function getLocaleDate(dateString) {
return parseDate(dateString).format(DATETIME_FORMAT);
}
/**
* Test if given day is today
*
* @param {Date} date
* @return {Boolean}
*/
function isToday(date) {
return date.format('YYYY-MM-DD') === now.format('YYYY-MM-DD');
}
/**
* Test if given day is in the past
*
* @param {Date} date
* @return {Boolean}
*/
function isInPast(dateString) {
if (!dateString) {
return false;
}
var parsed = parseDate(dateString);
return parsed.format('YYYY-MM-DD') < now.format('YYYY-MM-DD');
}
/**
* Return full date representation
*
* @param {String} dateString
* @return {String}
*/
function fullDate(dateString) {
return parseDate(dateString).format(DATETIME_FORMAT);
}
/**
* Format time of a date
*
* @param {String} dateString
* @return {String}
*/
function formatTime(dateString) {
return parseDate(dateString).format(TIME_FORMAT);
}
/**
* Format date of a date (without time)
*
* @param {String} dateString
* @return {String}
*/
function formatDate(dateString) {
return parseDate(dateString).format(DATE_FORMAT);
}
/**
* Wrapper for alertifyjs
*/
var notify = exports.notify = {
success: function success(message) {
return _alertifyjs2.default.success(message);
},
error: function error(message) {
return _alertifyjs2.default.error(message);
}
};
/**
* Get text from html
*
* @param {string} html
* @return {string}
*/
function getTextFromHtml(html) {
var div = document.createElement('div');
div.innerHTML = formatHTML(html);
var tree = document.createTreeWalker(div, NodeFilter.SHOW_TEXT, null, false); // ie requires all params
var text = [];
while (tree.nextNode()) {
text.push(tree.currentNode.textContent);
if (tree.currentNode.nextSibling) {
switch (tree.currentNode.nextSibling.nodeName) {
case 'BR':
case 'HR':
text.push('\n');
}
continue;
}
switch (tree.currentNode.parentNode.nodeName) {
case 'P':
case 'LI':
case 'H1':
case 'H2':
case 'H3':
case 'H4':
case 'H5':
case 'DIV':
case 'TABLE':
case 'BLOCKQUOTE':
text.push('\n');
}
}
return text.join('');
}
/**
* Get word count for given item
*
* @param {Object} item
* @return {number}
*/
function wordCount(item) {
if ((0, _lodash.isInteger)(item.wordcount)) {
return item.wordcount;
}
if (!item.body_html) {
return 0;
}
var text = getTextFromHtml(item.body_html);
return text.split(' ').filter(function (x) {
return x.trim();
}).length || 0;
}
/**
* Toggle value within array
*
* returns a new array so can be used with setState
*
* @param {Array} items
* @param {mixed} value
* @return {Array}
*/
function toggleValue(items, value) {
if (!items) {
return [value];
}
var without = items.filter(function (x) {
return value !== x;
});
return without.length === items.length ? without.concat([value]) : without;
}
function updateRouteParams(updates, state) {
var params = new URLSearchParams(window.location.search);
var dirty = false;
Object.keys(updates).forEach(function (key) {
if (updates[key]) {
dirty = dirty || updates[key] !== params.get(key);
params.set(key, updates[key]);
} else {
dirty = dirty || params.has(key) || params.entries.length == 0;
params.delete(key);
}
});
if (dirty) {
history.pushState(state, null, '?' + params.toString());
}
}
var SHIFT_OUT_REGEXP = new RegExp(String.fromCharCode(14), 'g');
/**
* Replace some white characters in html
*
* @param {String} html
* @return {String}
*/
function formatHTML(html) {
return html.replace(SHIFT_OUT_REGEXP, html.indexOf('<pre>') === -1 ? '<br>' : '\n');
}
/**
* Initializes the web socket listener
* @param store
*/
function initWebSocket(store, action) {
if (window.newsroom) {
var ws = new WebSocket(window.newsroom.websocket);
ws.onmessage = function (message) {
var data = JSON.parse(message.data);
if (data.event) {
store.dispatch(action(data));
}
};
}
}
/**
* Generic error handler for http requests
* @param error
* @param dispatch
* @param setError
*/
function errorHandler(error, dispatch, setError) {
console.error('error', error);
if (error.response.status !== 400) {
notify.error(error.response.statusText);
return;
}
if (setError) {
error.response.json().then(function (data) {
dispatch(setError(data));
});
}
}
/**
* Get config value
*
* @param {String} key
* @param {Mixed} defaultValue
* @return {Mixed}
*/
function getConfig(key, defaultValue) {
return (0, _lodash.get)(window.newsroom, key, defaultValue);
}
function getTimezoneOffset() {
return now.utcOffset() ? now.utcOffset() * -1 : 0; // it's oposite to Date.getTimezoneOffset
}
function isTouchDevice() {
return 'ontouchstart' in window // works on most browsers
|| navigator.maxTouchPoints; // works on IE10/11 and Surface
}
/***/ }),
/***/ 100:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _utils = __webpack_require__(1);
var _lodash = __webpack_require__(7);
var _CheckboxInput = __webpack_require__(27);
var _CheckboxInput2 = _interopRequireDefault(_CheckboxInput);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var EditPanel = function (_React$Component) {
_inherits(EditPanel, _React$Component);
function EditPanel(props) {
_classCallCheck(this, EditPanel);
var _this = _possibleConstructorReturn(this, (EditPanel.__proto__ || Object.getPrototypeOf(EditPanel)).call(this, props));
_this.onItemChange = _this.onItemChange.bind(_this);
_this.saveItems = _this.saveItems.bind(_this);
_this.initItems = _this.initItems.bind(_this);
_this.state = { activeParent: props.parent._id, items: {} };
return _this;
}
_createClass(EditPanel, [{
key: 'onItemChange',
value: function onItemChange(event) {
var item = event.target.name;
var items = Object.assign({}, this.state.items);
items[item] = !items[item];
this.setState({ items: items });
}
}, {
key: 'saveItems',
value: function saveItems(event) {
event.preventDefault();
this.props.onSave(Object.keys((0, _lodash.pickBy)(this.state.items)));
}
}, {
key: 'initItems',
value: function initItems(props) {
var items = {};
props.items.map(function (item) {
return items[item._id] = (props.parent[props.field] || []).includes(item._id);
});
this.setState({ activeParent: props.parent._id, items: items });
}
}, {
key: 'componentWillMount',
value: function componentWillMount() {
this.initItems(this.props);
}
}, {
key: 'componentWillReceiveProps',
value: function componentWillReceiveProps(nextProps) {
if (this.state.activeParent !== nextProps.parent._id) {
this.initItems(nextProps);
}
}
}, {
key: 'render',
value: function render() {
var _this2 = this;
return _react2.default.createElement(
'div',
{ className: 'tab-pane active', id: 'navigations' },
_react2.default.createElement(
'form',
{ onSubmit: this.saveItems },
_react2.default.createElement(
'div',
{ className: 'list-item__preview-form' },
_react2.default.createElement(
'ul',
{ className: 'list-unstyled' },
this.props.items.map(function (item) {
return _react2.default.createElement(
'li',
{ key: item._id },
_react2.default.createElement(_CheckboxInput2.default, {
name: item._id,
label: item.name,
value: !!_this2.state.items[item._id],
onChange: _this2.onItemChange })
);
})
)
),
_react2.default.createElement(
'div',
{ className: 'list-item__preview-footer' },
_react2.default.createElement('input', {
type: 'submit',
className: 'btn btn-outline-primary',
value: (0, _utils.gettext)('Save')
})
)
)
);
}
}]);
return EditPanel;
}(_react2.default.Component);
EditPanel.propTypes = {
parent: _propTypes2.default.object.isRequired,
items: _propTypes2.default.arrayOf(_propTypes2.default.object),
field: _propTypes2.default.string,
onSave: _propTypes2.default.func.isRequired
};
exports.default = EditPanel;
/***/ }),
/***/ 11:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.DISPLAY_ABSTRACT = undefined;
exports.getReadItems = getReadItems;
exports.markItemAsRead = markItemAsRead;
exports.getNewsOnlyParam = getNewsOnlyParam;
exports.toggleNewsOnlyParam = toggleNewsOnlyParam;
exports.getActiveFilterTab = getActiveFilterTab;
exports.setActiveFilterTab = setActiveFilterTab;
exports.getMaxVersion = getMaxVersion;
exports.getIntVersion = getIntVersion;
exports.getPicture = getPicture;
exports.getThumbnailRendition = getThumbnailRendition;
exports.getPreviewRendition = getPreviewRendition;
exports.getDetailRendition = getDetailRendition;
exports.isKilled = isKilled;
exports.isPreformatted = isPreformatted;
exports.showItemVersions = showItemVersions;
exports.shortText = shortText;
exports.getCaption = getCaption;
exports.getActiveQuery = getActiveQuery;
exports.isTopicActive = isTopicActive;
exports.isEqualItem = isEqualItem;
var _store = __webpack_require__(31);
var _store2 = _interopRequireDefault(_store);
var _localStorage = __webpack_require__(18);
var _localStorage2 = _interopRequireDefault(_localStorage);
var _operations = __webpack_require__(41);
var _operations2 = _interopRequireDefault(_operations);
var _lodash = __webpack_require__(7);
var _utils = __webpack_require__(1);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
var STATUS_KILLED = 'canceled';
var READ_ITEMS_STORE = 'read_items';
var NEWS_ONLY_STORE = 'news_only';
var FILTER_TAB = 'filter_tab';
var DISPLAY_ABSTRACT = exports.DISPLAY_ABSTRACT = (0, _utils.getConfig)('display_abstract');
var store = _store2.default.createStore([_localStorage2.default], [_operations2.default]);
/**
* Get read items
*
* @returns {Object}
*/
function getReadItems() {
return store.get(READ_ITEMS_STORE);
}
/**
* Marks the given item as read
*
* @param {Object} item
* @param {Object} state
*/
function markItemAsRead(item, state) {
if (item && item._id && item.version) {
var readItems = (0, _lodash.get)(state, 'readItems', getReadItems()) || {};
store.assign(READ_ITEMS_STORE, _defineProperty({}, item._id, getMaxVersion(readItems[item._id], item.version)));
}
}
/**
* Get news only value
*
* @returns {boolean}
*/
function getNewsOnlyParam() {
return !!(store.get(NEWS_ONLY_STORE) || {}).value;
}
/**
* Toggles news only value
*
*/
function toggleNewsOnlyParam() {
store.assign(NEWS_ONLY_STORE, { value: !getNewsOnlyParam() });
}
/**
* Get active filter tab
*
* @returns {boolean}
*/
function getActiveFilterTab() {
return (store.get(FILTER_TAB) || {}).value;
}
/**
* Set active filter tab
*
*/
function setActiveFilterTab(tab) {
store.assign(FILTER_TAB, { value: tab });
}
/**
* Returns the greater version
*
* @param versionA
* @param versionB
* @returns {number}
*/
function getMaxVersion(versionA, versionB) {
return Math.max(parseInt(versionA, 10) || 0, parseInt(versionB, 10) || 0);
}
/**
* Returns the item version as integer
*
* @param {Object} item
* @returns {number}
*/
function getIntVersion(item) {
if (item) {
return parseInt(item.version, 10) || 0;
}
}
/**
* Get picture for an item
*
* if item is picture return it, otherwise look for featuremedia
*
* @param {Object} item
* @return {Object}
*/
function getPicture(item) {
return item.type === 'picture' ? item : (0, _lodash.get)(item, 'associations.featuremedia', getBodyPicture(item));
}
function getBodyPicture(item) {
var pictures = Object.values((0, _lodash.get)(item, 'associations', {})).filter(function (assoc) {
return (0, _lodash.get)(assoc, 'type') === 'picture';
});
return pictures.length ? pictures[0] : null;
}
/**
* Get picture thumbnail rendition specs
*
* @param {Object} picture
* @param {Boolean} large
* @return {Object}
*/
function getThumbnailRendition(picture, large) {
var rendition = large ? 'renditions._newsroom_thumbnail_large' : 'renditions._newsroom_thumbnail';
return (0, _lodash.get)(picture, rendition, (0, _lodash.get)(picture, 'renditions.thumbnail'));
}
/**
* Get picture preview rendition
*
* @param {Object} picture
* @return {Object}
*/
function getPreviewRendition(picture) {
return (0, _lodash.get)(picture, 'renditions._newsroom_view', (0, _lodash.get)(picture, 'renditions.viewImage'));
}
/**
* Get picture detail rendition
*
* @param {Object} picture
* @return {Object}
*/
function getDetailRendition(picture) {
return (0, _lodash.get)(picture, 'renditions._newsroom_base', (0, _lodash.get)(picture, 'renditions.baseImage'));
}
/**
* Test if an item is killed
*
* @param {Object} item
* @return {Boolean}
*/
function isKilled(item) {
return item.pubstatus === STATUS_KILLED;
}
/**
* Checks if item is preformatted
*
* @param {Object} item
* @return {Boolean}
*/
function isPreformatted(item) {
return item.body_html.includes('<pre>');
}
/**
* Test if other item versions should be visible
*
* @param {Object} item
* @param {bool} next toggle if checking for next or previous versions
* @return {Boolean}
*/
function showItemVersions(item, next) {
return !isKilled(item) && (next || item.ancestors && item.ancestors.length);
}
/**
* Get short text for lists
*
* @param {Item} item
* @return {Node}
*/
function shortText(item) {
var length = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 40;
var html = item.description_html || item.body_html || '<p></p>';
var text = item.description_text || (0, _utils.getTextFromHtml)(html);
var words = text.split(/\s/).filter(function (w) {
return w;
});
return words.slice(0, length).join(' ') + (words.length > length ? '...' : '');
}
/**
* Get caption for picture
*
* @param {Object} picture
* @return {String}
*/
function getCaption(picture) {
return (0, _utils.getTextFromHtml)(picture.body_text || picture.description_text || '').trim();
}
function getActiveQuery(query, activeFilter, createdFilter) {
var queryParams = {
query: query || null,
filter: (0, _lodash.pickBy)(activeFilter),
created: (0, _lodash.pickBy)(createdFilter)
};
return (0, _lodash.pickBy)(queryParams, function (val) {
return !(0, _lodash.isEmpty)(val);
});
}
function isTopicActive(topic, activeQuery) {
var topicQuery = getActiveQuery(topic.query, topic.filter, topic.created);
return !(0, _lodash.isEmpty)(activeQuery) && (0, _lodash.isEqual)(topicQuery, activeQuery);
}
/**
* Test if 2 items are equal
*
* @param {Object} a
* @param {Object} b
* @return {Boolean}
*/
function isEqualItem(a, b) {
return a && b && a._id === b._id && a.version === b.version;
}
/***/ }),
/***/ 125:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.SET_ERROR = exports.GET_PRODUCTS = exports.GET_NAVIGATIONS = exports.QUERY_NAVIGATIONS = exports.SET_QUERY = exports.CANCEL_EDIT = exports.NEW_NAVIGATION = exports.EDIT_NAVIGATION = exports.SELECT_NAVIGATION = undefined;
exports.selectNavigation = selectNavigation;
exports.editNavigation = editNavigation;
exports.newNavigation = newNavigation;
exports.cancelEdit = cancelEdit;
exports.setQuery = setQuery;
exports.queryNavigations = queryNavigations;
exports.getNavigations = getNavigations;
exports.getProducts = getProducts;
exports.setError = setError;
exports.fetchNavigations = fetchNavigations;
exports.postNavigation = postNavigation;
exports.deleteNavigation = deleteNavigation;
exports.fetchProducts = fetchProducts;
exports.saveProducts = saveProducts;
exports.initViewData = initViewData;
var _utils = __webpack_require__(1);
var _server = __webpack_require__(15);
var _server2 = _interopRequireDefault(_server);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var SELECT_NAVIGATION = exports.SELECT_NAVIGATION = 'SELECT_NAVIGATION';
function selectNavigation(id) {
return { type: SELECT_NAVIGATION, id: id };
}
var EDIT_NAVIGATION = exports.EDIT_NAVIGATION = 'EDIT_NAVIGATION';
function editNavigation(event) {
return { type: EDIT_NAVIGATION, event: event };
}
var NEW_NAVIGATION = exports.NEW_NAVIGATION = 'NEW_NAVIGATION';
function newNavigation() {
return { type: NEW_NAVIGATION };
}
var CANCEL_EDIT = exports.CANCEL_EDIT = 'CANCEL_EDIT';
function cancelEdit(event) {
return { type: CANCEL_EDIT, event: event };
}
var SET_QUERY = exports.SET_QUERY = 'SET_QUERY';
function setQuery(query) {
return { type: SET_QUERY, query: query };
}
var QUERY_NAVIGATIONS = exports.QUERY_NAVIGATIONS = 'QUERY_NAVIGATIONS';
function queryNavigations() {
return { type: QUERY_NAVIGATIONS };
}
var GET_NAVIGATIONS = exports.GET_NAVIGATIONS = 'GET_NAVIGATIONS';
function getNavigations(data) {
return { type: GET_NAVIGATIONS, data: data };
}
var GET_PRODUCTS = exports.GET_PRODUCTS = 'GET_PRODUCTS';
function getProducts(data) {
return { type: GET_PRODUCTS, data: data };
}
var SET_ERROR = exports.SET_ERROR = 'SET_ERROR';
function setError(errors) {
return { type: SET_ERROR, errors: errors };
}
/**
* Fetches navigations
*
*/
function fetchNavigations() {
return function (dispatch, getState) {
dispatch(queryNavigations());
var query = getState().query || '';
return _server2.default.get('/navigations/search?q=' + query).then(function (data) {
return dispatch(getNavigations(data));
}).catch(function (error) {
return (0, _utils.errorHandler)(error, dispatch, setError);
});
};
}
/**
* Creates new navigations
*
*/
function postNavigation() {
return function (dispatch, getState) {
var navigation = getState().navigationToEdit;
var url = '/navigations/' + (navigation._id ? navigation._id : 'new');
return _server2.default.post(url, navigation).then(function () {
if (navigation._id) {
_utils.notify.success((0, _utils.gettext)('Navigation updated successfully'));
} else {
_utils.notify.success((0, _utils.gettext)('Navigation created successfully'));
}
dispatch(fetchNavigations());
}).catch(function (error) {
return (0, _utils.errorHandler)(error, dispatch, setError);
});
};
}
/**
* Deletes a navigation
*
*/
function deleteNavigation() {
return function (dispatch, getState) {
var navigation = getState().navigationToEdit;
var url = '/navigations/' + navigation._id;
return _server2.default.del(url).then(function () {
_utils.notify.success((0, _utils.gettext)('Navigation deleted successfully'));
dispatch(fetchNavigations());
}).catch(function (error) {
return (0, _utils.errorHandler)(error, dispatch, setError);
});
};
}
/**
* Fetches products
*
*/
function fetchProducts() {
return function (dispatch) {
return _server2.default.get('/products/search').then(function (data) {
dispatch(getProducts(data));
}).catch(function (error) {
return (0, _utils.errorHandler)(error, dispatch, setError);
});
};
}
/**
* Saves products for a navigation
*
*/
function saveProducts(products) {
return function (dispatch, getState) {
var navigation = getState().navigationToEdit;
return _server2.default.post('/navigations/' + navigation._id + '/products', { products: products }).then(function () {
_utils.notify.success((0, _utils.gettext)('Navigation updated successfully'));
dispatch(fetchProducts());
}).catch(function (error) {
return (0, _utils.errorHandler)(error, dispatch, setError);
});
};
}
function initViewData(data) {
return function (dispatch) {
dispatch(getNavigations(data.navigations));
dispatch(getProducts(data.products));
};
}
/***/ }),
/***/ 15:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var defaultOptions = {
credentials: 'same-origin'
};
function options() {
var custom = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
return Object.assign({}, defaultOptions, custom);
}
function checkStatus(response) {
if (response.status >= 200 && response.status < 300) {
return response.json();
} else {
var error = new Error(response.statusText);
error.response = response;
throw error;
}
}
var Server = function () {
function Server() {
_classCallCheck(this, Server);
}
_createClass(Server, [{
key: 'get',
/**
* Make GET request
*
* @param {String} url
* @return {Promise}
*/
value: function get(url) {
return fetch(url, options({})).then(checkStatus);
}
/**
* Make POST request to url
*
* @param {String} url
* @param {Object} data
* @return {Promise}
*/
}, {
key: 'post',
value: function post(url, data) {
return fetch(url, options({
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(data)
})).then(checkStatus);
}
/**
* Make POST request to url in keeps the format of the input
*
* @param {String} url
* @param {Object} data
* @return {Promise}
*/
}, {
key: 'postFiles',
value: function postFiles(url, data) {
return fetch(url, options({
method: 'POST',
body: data
})).then(checkStatus);
}
/**
* Make DELETE request to url
*
* @param {String} url
* @return {Promise}
*/
}, {
key: 'del',
value: function del(url, data) {
return fetch(url, options({
method: 'DELETE',
headers: { 'Content-Type': 'application/json' },
body: data ? JSON.stringify(data) : null
})).then(checkStatus);
}
}]);
return Server;
}();
exports.default = new Server();
/***/ }),
/***/ 16:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.renderModal = renderModal;
exports.closeModal = closeModal;
var RENDER_MODAL = exports.RENDER_MODAL = 'RENDER_MODAL';
function renderModal(modal, data) {
return { type: RENDER_MODAL, modal: modal, data: data };
}
var CLOSE_MODAL = exports.CLOSE_MODAL = 'CLOSE_MODAL';
function closeModal() {
return { type: CLOSE_MODAL };
}
/***/ }),
/***/ 18:
/***/ (function(module, exports, __webpack_require__) {
var util = __webpack_require__(5)
var Global = util.Global
module.exports = {
name: 'localStorage',
read: read,
write: write,
each: each,
remove: remove,
clearAll: clearAll,
}
function localStorage() {
return Global.localStorage
}
function read(key) {
return localStorage().getItem(key)
}
function write(key, data) {
return localStorage().setItem(key, data)
}
function each(fn) {
for (var i = localStorage().length - 1; i >= 0; i--) {
var key = localStorage().key(i)
fn(read(key), key)
}
}
function remove(key) {
return localStorage().removeItem(key)
}
function clearAll() {
return localStorage().clear()
}
/***/ }),
/***/ 26:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function TextInput(_ref) {
var type = _ref.type,
name = _ref.name,
label = _ref.label,
onChange = _ref.onChange,
value = _ref.value,
error = _ref.error,
required = _ref.required,
readOnly = _ref.readOnly,
maxLength = _ref.maxLength;
var wrapperClass = 'form-group';
if (error && error.length > 0) {
wrapperClass += ' has-error';
}
if (!name) {
name = 'input-' + label;
}
return _react2.default.createElement(
'div',
{ className: wrapperClass },
_react2.default.createElement(
'label',
{ htmlFor: name },
label
),
_react2.default.createElement(
'div',
{ className: 'field' },
_react2.default.createElement('input', {
type: type || 'text',
id: name,
name: name,
className: 'form-control',
value: value,
onChange: onChange,
required: required,
maxLength: maxLength,
readOnly: readOnly
}),
error && _react2.default.createElement(
'div',
{ className: 'alert alert-danger' },
error
)
)
);
}
TextInput.propTypes = {
type: _propTypes2.default.string,
label: _propTypes2.default.string.isRequired,
name: _propTypes2.default.string,
value: _propTypes2.default.string,
error: _propTypes2.default.arrayOf(_propTypes2.default.string),
onChange: _propTypes2.default.func,
required: _propTypes2.default.bool,
readOnly: _propTypes2.default.bool,
maxLength: _propTypes2.default.number
};
exports.default = TextInput;
/***/ }),
/***/ 27:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _classnames = __webpack_require__(14);
var _classnames2 = _interopRequireDefault(_classnames);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function CheckboxInput(_ref) {
var name = _ref.name,
label = _ref.label,
onChange = _ref.onChange,
value = _ref.value,
labelClass = _ref.labelClass;
if (!name) {
name = 'input-' + label;
}
return _react2.default.createElement(
'div',
{ className: 'form-check p-0' },
_react2.default.createElement(
'div',
{ className: 'custom-control custom-checkbox' },
_react2.default.createElement('input', { type: 'checkbox',
name: name,
className: 'custom-control-input',
checked: value,
id: name,
onChange: onChange }),
_react2.default.createElement(
'label',
{ className: (0, _classnames2.default)('custom-control-label', labelClass), htmlFor: name },
label
)
)
);
}
CheckboxInput.propTypes = {
name: _propTypes2.default.string,
label: _propTypes2.default.string.isRequired,
onChange: _propTypes2.default.func.isRequired,
value: _propTypes2.default.bool.isRequired,
labelClass: _propTypes2.default.string
};
exports.default = CheckboxInput;
/***/ }),
/***/ 29:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _lodash = __webpack_require__(7);
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var Analytics = function () {
function Analytics() {
_classCallCheck(this, Analytics);
}
_createClass(Analytics, [{
key: '_event',
value: function _event(name, params) {
if (window.gtag) {
var company = (0, _lodash.get)(window, 'profileData.companyName', 'none');
var user = (0, _lodash.get)(window, 'profileData.user.first_name', 'unknown');
var userParams = {
event_category: company,
company: company,
user: user
};
window.gtag('event', name, Object.assign(userParams, params));
}
}
}, {
key: 'event',
value: function event(name, label, params) {
this._event(name, Object.assign({
event_label: label
}, params));
}
}, {
key: 'itemEvent',
value: function itemEvent(name, item, params) {
this.event(name, item.headline || item.slugline, params);
}
}, {
key: 'timingComplete',
value: function timingComplete(name, value) {
this._event('timing_complete', { name: name, value: value });
}
}, {
key: 'pageview',
value: function pageview(title, path) {
if (window.gtag) {
window.gtag('config', (0, _lodash.get)(window, 'newsroom.analytics'), {
page_title: title,
page_path: path
});
}
}
}, {
key: 'itemView',
value: function itemView(item) {
if (item) {
this.pageview(item.headline || item.slugline, '/wire?item=' + item._id);
} else {
this.pageview();
}
}
}, {
key: 'sendEvents',
value: function sendEvents(events) {
var _this = this;
events.forEach(function (event) {
_this._event(event);
});
}
}]);
return Analytics;
}();
// make it available
window.analytics = new Analytics();
exports.default = window.analytics;
/***/ }),
/***/ 31:
/***/ (function(module, exports, __webpack_require__) {
var engine = __webpack_require__(32)
var storages = __webpack_require__(33)
var plugins = [__webpack_require__(39)]
module.exports = engine.createStore(storages, plugins)
/***/ }),
/***/ 32:
/***/ (function(module, exports, __webpack_require__) {
var util = __webpack_require__(5)
var slice = util.slice
var pluck = util.pluck
var each = util.each
var bind = util.bind
var create = util.create
var isList = util.isList
var isFunction = util.isFunction
var isObject = util.isObject
module.exports = {
createStore: createStore
}
var storeAPI = {
version: '2.0.12',
enabled: false,
// get returns the value of the given key. If that value
// is undefined, it returns optionalDefaultValue instead.
get: function(key, optionalDefaultValue) {
var data = this.storage.read(this._namespacePrefix + key)
return this._deserialize(data, optionalDefaultValue)
},
// set will store the given value at key and returns value.
// Calling set with value === undefined is equivalent to calling remove.
set: function(key, value) {
if (value === undefined) {
return this.remove(key)
}
this.storage.write(this._namespacePrefix + key, this._serialize(value))
return value
},
// remove deletes the key and value stored at the given key.
remove: function(key) {
this.storage.remove(this._namespacePrefix + key)
},
// each will call the given callback once for each key-value pair
// in this store.
each: function(callback) {
var self = this
this.storage.each(function(val, namespacedKey) {
callback.call(self, self._deserialize(val), (namespacedKey || '').replace(self._namespaceRegexp, ''))
})
},
// clearAll will remove all the stored key-value pairs in this store.
clearAll: function() {
this.storage.clearAll()
},
// additional functionality that can't live in plugins
// ---------------------------------------------------
// hasNamespace returns true if this store instance has the given namespace.
hasNamespace: function(namespace) {
return (this._namespacePrefix == '__storejs_'+namespace+'_')
},
// createStore creates a store.js instance with the first
// functioning storage in the list of storage candidates,
// and applies the the given mixins to the instance.
createStore: function() {
return createStore.apply(this, arguments)
},
addPlugin: function(plugin) {
this._addPlugin(plugin)
},
namespace: function(namespace) {
return createStore(this.storage, this.plugins, namespace)
}
}
function _warn() {
var _console = (typeof console == 'undefined' ? null : console)
if (!_console) { return }
var fn = (_console.warn ? _console.warn : _console.log)
fn.apply(_console, arguments)
}
function createStore(storages, plugins, namespace) {
if (!namespace) {
namespace = ''
}
if (storages && !isList(storages)) {
storages = [storages]
}
if (plugins && !isList(plugins)) {
plugins = [plugins]
}
var namespacePrefix = (namespace ? '__storejs_'+namespace+'_' : '')
var namespaceRegexp = (namespace ? new RegExp('^'+namespacePrefix) : null)
var legalNamespaces = /^[a-zA-Z0-9_\-]*$/ // alpha-numeric + underscore and dash
if (!legalNamespaces.test(namespace)) {
throw new Error('store.js namespaces can only have alphanumerics + underscores and dashes')
}
var _privateStoreProps = {
_namespacePrefix: namespacePrefix,
_namespaceRegexp: namespaceRegexp,
_testStorage: function(storage) {
try {
var testStr = '__storejs__test__'
storage.write(testStr, testStr)
var ok = (storage.read(testStr) === testStr)
storage.remove(testStr)
return ok
} catch(e) {
return false
}
},
_assignPluginFnProp: function(pluginFnProp, propName) {
var oldFn = this[propName]
this[propName] = function pluginFn() {
var args = slice(arguments, 0)
var self = this
// super_fn calls the old function which was overwritten by
// this mixin.
function super_fn() {
if (!oldFn) { return }
each(arguments, function(arg, i) {
args[i] = arg
})
return oldFn.apply(self, args)
}
// Give mixing function access to super_fn by prefixing all mixin function
// arguments with super_fn.
var newFnArgs = [super_fn].concat(args)
return pluginFnProp.apply(self, newFnArgs)
}
},
_serialize: function(obj) {
return JSON.stringify(obj)
},
_deserialize: function(strVal, defaultVal) {
if (!strVal) { return defaultVal }
// It is possible that a raw string value has been previously stored
// in a storage without using store.js, meaning it will be a raw
// string value instead of a JSON serialized string. By defaulting
// to the raw string value in case of a JSON parse error, we allow
// for past stored values to be forwards-compatible with store.js
var val = ''
try { val = JSON.parse(strVal) }
catch(e) { val = strVal }
return (val !== undefined ? val : defaultVal)
},
_addStorage: function(storage) {
if (this.enabled) { return }
if (this._testStorage(storage)) {
this.storage = storage
this.enabled = true
}
},
_addPlugin: function(plugin) {
var self = this
// If the plugin is an array, then add all plugins in the array.
// This allows for a plugin to depend on other plugins.
if (isList(plugin)) {
each(plugin, function(plugin) {
self._addPlugin(plugin)
})
return
}
// Keep track of all plugins we've seen so far, so that we
// don't add any of them twice.
var seenPlugin = pluck(this.plugins, function(seenPlugin) {
return (plugin === seenPlugin)
})
if (seenPlugin) {
return
}
this.plugins.push(plugin)
// Check that the plugin is properly formed
if (!isFunction(plugin)) {
throw new Error('Plugins must be function values that return objects')
}
var pluginProperties = plugin.call(this)
if (!isObject(pluginProperties)) {
throw new Error('Plugins must return an object of function properties')
}
// Add the plugin function properties to this store instance.
each(pluginProperties, function(pluginFnProp, propName) {
if (!isFunction(pluginFnProp)) {
throw new Error('Bad plugin property: '+propName+' from plugin '+plugin.name+'. Plugins should only return functions.')
}
self._assignPluginFnProp(pluginFnProp, propName)
})
},
// Put deprecated properties in the private API, so as to not expose it to accidential
// discovery through inspection of the store object.
// Deprecated: addStorage
addStorage: function(storage) {
_warn('store.addStorage(storage) is deprecated. Use createStore([storages])')
this._addStorage(storage)
}
}
var store = create(_privateStoreProps, storeAPI, {
plugins: []
})
store.raw = {}
each(store, function(prop, propName) {
if (isFunction(prop)) {
store.raw[propName] = bind(store, prop)
}
})
each(storages, function(storage) {
store._addStorage(storage)
})
each(plugins, function(plugin) {
store._addPlugin(plugin)
})
return store
}
/***/ }),
/***/ 33:
/***/ (function(module, exports, __webpack_require__) {
module.exports = [
// Listed in order of usage preference
__webpack_require__(18),
__webpack_require__(34),
__webpack_require__(35),
__webpack_require__(36),
__webpack_require__(37),
__webpack_require__(38)
]
/***/ }),
/***/ 34:
/***/ (function(module, exports, __webpack_require__) {
// oldFF-globalStorage provides storage for Firefox
// versions 6 and 7, where no localStorage, etc
// is available.
var util = __webpack_require__(5)
var Global = util.Global
module.exports = {
name: 'oldFF-globalStorage',
read: read,
write: write,
each: each,
remove: remove,
clearAll: clearAll,
}
var globalStorage = Global.globalStorage
function read(key) {
return globalStorage[key]
}
function write(key, data) {
globalStorage[key] = data
}
function each(fn) {
for (var i = globalStorage.length - 1; i >= 0; i--) {
var key = globalStorage.key(i)
fn(globalStorage[key], key)
}
}
function remove(key) {
return globalStorage.removeItem(key)
}
function clearAll() {
each(function(key, _) {
delete globalStorage[key]
})
}
/***/ }),
/***/ 35:
/***/ (function(module, exports, __webpack_require__) {
// oldIE-userDataStorage provides storage for Internet Explorer
// versions 6 and 7, where no localStorage, sessionStorage, etc
// is available.
var util = __webpack_require__(5)
var Global = util.Global
module.exports = {
name: 'oldIE-userDataStorage',
write: write,
read: read,
each: each,
remove: remove,
clearAll: clearAll,
}
var storageName = 'storejs'
var doc = Global.document
var _withStorageEl = _makeIEStorageElFunction()
var disable = (Global.navigator ? Global.navigator.userAgent : '').match(/ (MSIE 8|MSIE 9|MSIE 10)\./) // MSIE 9.x, MSIE 10.x
function write(unfixedKey, data) {
if (disable) { return }
var fixedKey = fixKey(unfixedKey)
_withStorageEl(function(storageEl) {
storageEl.setAttribute(fixedKey, data)
storageEl.save(storageName)
})
}
function read(unfixedKey) {
if (disable) { return }
var fixedKey = fixKey(unfixedKey)
var res = null
_withStorageEl(function(storageEl) {
res = storageEl.getAttribute(fixedKey)
})
return res
}
function each(callback) {
_withStorageEl(function(storageEl) {
var attributes = storageEl.XMLDocument.documentElement.attributes
for (var i=attributes.length-1; i>=0; i--) {
var attr = attributes[i]
callback(storageEl.getAttribute(attr.name), attr.name)
}
})
}
function remove(unfixedKey) {
var fixedKey = fixKey(unfixedKey)
_withStorageEl(function(storageEl) {
storageEl.removeAttribute(fixedKey)
storageEl.save(storageName)
})
}
function clearAll() {
_withStorageEl(function(storageEl) {
var attributes = storageEl.XMLDocument.documentElement.attributes
storageEl.load(storageName)
for (var i=attributes.length-1; i>=0; i--) {
storageEl.removeAttribute(attributes[i].name)
}
storageEl.save(storageName)
})
}
// Helpers
//////////
// In IE7, keys cannot start with a digit or contain certain chars.
// See https://github.com/marcuswestin/store.js/issues/40
// See https://github.com/marcuswestin/store.js/issues/83
var forbiddenCharsRegex = new RegExp("[!\"#$%&'()*+,/\\\\:;<=>?@[\\]^`{|}~]", "g")
function fixKey(key) {
return key.replace(/^\d/, '___$&').replace(forbiddenCharsRegex, '___')
}
function _makeIEStorageElFunction() {
if (!doc || !doc.documentElement || !doc.documentElement.addBehavior) {
return null
}
var scriptTag = 'script',
storageOwner,
storageContainer,
storageEl
// Since #userData storage applies only to specific paths, we need to
// somehow link our data to a specific path. We choose /favicon.ico
// as a pretty safe option, since all browsers already make a request to
// this URL anyway and being a 404 will not hurt us here. We wrap an
// iframe pointing to the favicon in an ActiveXObject(htmlfile) object
// (see: http://msdn.microsoft.com/en-us/library/aa752574(v=VS.85).aspx)
// since the iframe access rules appear to allow direct access and
// manipulation of the document element, even for a 404 page. This
// document can be used instead of the current document (which would
// have been limited to the current path) to perform #userData storage.
try {
/* global ActiveXObject */
storageContainer = new ActiveXObject('htmlfile')
storageContainer.open()
storageContainer.write('<'+scriptTag+'>document.w=window</'+scriptTag+'><iframe src="/favicon.ico"></iframe>')
storageContainer.close()
storageOwner = storageContainer.w.frames[0].document
storageEl = storageOwner.createElement('div')
} catch(e) {
// somehow ActiveXObject instantiation failed (perhaps some special
// security settings or otherwse), fall back to per-path storage
storageEl = doc.createElement('div')
storageOwner = doc.body
}
return function(storeFunction) {
var args = [].slice.call(arguments, 0)
args.unshift(storageEl)
// See http://msdn.microsoft.com/en-us/library/ms531081(v=VS.85).aspx
// and http://msdn.microsoft.com/en-us/library/ms531424(v=VS.85).aspx
storageOwner.appendChild(storageEl)
storageEl.addBehavior('#default#userData')
storageEl.load(storageName)
storeFunction.apply(this, args)
storageOwner.removeChild(storageEl)
return
}
}
/***/ }),
/***/ 36:
/***/ (function(module, exports, __webpack_require__) {
// cookieStorage is useful Safari private browser mode, where localStorage
// doesn't work but cookies do. This implementation is adopted from
// https://developer.mozilla.org/en-US/docs/Web/API/Storage/LocalStorage
var util = __webpack_require__(5)
var Global = util.Global
var trim = util.trim
module.exports = {
name: 'cookieStorage',
read: read,
write: write,
each: each,
remove: remove,
clearAll: clearAll,
}
var doc = Global.document
function read(key) {
if (!key || !_has(key)) { return null }
var regexpStr = "(?:^|.*;\\s*)" +
escape(key).replace(/[\-\.\+\*]/g, "\\$&") +
"\\s*\\=\\s*((?:[^;](?!;))*[^;]?).*"
return unescape(doc.cookie.replace(new RegExp(regexpStr), "$1"))
}
function each(callback) {
var cookies = doc.cookie.split(/; ?/g)
for (var i = cookies.length - 1; i >= 0; i--) {
if (!trim(cookies[i])) {
continue
}
var kvp = cookies[i].split('=')
var key = unescape(kvp[0])
var val = unescape(kvp[1])
callback(val, key)
}
}
function write(key, data) {
if(!key) { return }
doc.cookie = escape(key) + "=" + escape(data) + "; expires=Tue, 19 Jan 2038 03:14:07 GMT; path=/"
}
function remove(key) {
if (!key || !_has(key)) {
return
}
doc.cookie = escape(key) + "=; expires=Thu, 01 Jan 1970 00:00:00 GMT; path=/"
}
function clearAll() {
each(function(_, key) {
remove(key)
})
}
function _has(key) {
return (new RegExp("(?:^|;\\s*)" + escape(key).replace(/[\-\.\+\*]/g, "\\$&") + "\\s*\\=")).test(doc.cookie)
}
/***/ }),
/***/ 37:
/***/ (function(module, exports, __webpack_require__) {
var util = __webpack_require__(5)
var Global = util.Global
module.exports = {
name: 'sessionStorage',
read: read,
write: write,
each: each,
remove: remove,
clearAll: clearAll
}
function sessionStorage() {
return Global.sessionStorage
}
function read(key) {
return sessionStorage().getItem(key)
}
function write(key, data) {
return sessionStorage().setItem(key, data)
}
function each(fn) {
for (var i = sessionStorage().length - 1; i >= 0; i--) {
var key = sessionStorage().key(i)
fn(read(key), key)
}
}
function remove(key) {
return sessionStorage().removeItem(key)
}
function clearAll() {
return sessionStorage().clear()
}
/***/ }),
/***/ 38:
/***/ (function(module, exports) {
// memoryStorage is a useful last fallback to ensure that the store
// is functions (meaning store.get(), store.set(), etc will all function).
// However, stored values will not persist when the browser navigates to
// a new page or reloads the current page.
module.exports = {
name: 'memoryStorage',
read: read,
write: write,
each: each,
remove: remove,
clearAll: clearAll,
}
var memoryStorage = {}
function read(key) {
return memoryStorage[key]
}
function write(key, data) {
memoryStorage[key] = data
}
function each(callback) {
for (var key in memoryStorage) {
if (memoryStorage.hasOwnProperty(key)) {
callback(memoryStorage[key], key)
}
}
}
function remove(key) {
delete memoryStorage[key]
}
function clearAll(key) {
memoryStorage = {}
}
/***/ }),
/***/ 39:
/***/ (function(module, exports, __webpack_require__) {
module.exports = json2Plugin
function json2Plugin() {
__webpack_require__(40)
return {}
}
/***/ }),
/***/ 40:
/***/ (function(module, exports) {
/* eslint-disable */
// json2.js
// 2016-10-28
// Public Domain.
// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
// See http://www.JSON.org/js.html
// This code should be minified before deployment.
// See http://javascript.crockford.com/jsmin.html
// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
// NOT CONTROL.
// This file creates a global JSON object containing two methods: stringify
// and parse. This file provides the ES5 JSON capability to ES3 systems.
// If a project might run on IE8 or earlier, then this file should be included.
// This file does nothing on ES5 systems.
// JSON.stringify(value, replacer, space)
// value any JavaScript value, usually an object or array.
// replacer an optional parameter that determines how object
// values are stringified for objects. It can be a
// function or an array of strings.
// space an optional parameter that specifies the indentation
// of nested structures. If it is omitted, the text will
// be packed without extra whitespace. If it is a number,
// it will specify the number of spaces to indent at each
// level. If it is a string (such as "\t" or " "),
// it contains the characters used to indent at each level.
// This method produces a JSON text from a JavaScript value.
// When an object value is found, if the object contains a toJSON
// method, its toJSON method will be called and the result will be
// stringified. A toJSON method does not serialize: it returns the
// value represented by the name/value pair that should be serialized,
// or undefined if nothing should be serialized. The toJSON method
// will be passed the key associated with the value, and this will be
// bound to the value.
// For example, this would serialize Dates as ISO strings.
// Date.prototype.toJSON = function (key) {
// function f(n) {
// // Format integers to have at least two digits.
// return (n < 10)
// ? "0" + n
// : n;
// }
// return this.getUTCFullYear() + "-" +
// f(this.getUTCMonth() + 1) + "-" +
// f(this.getUTCDate()) + "T" +
// f(this.getUTCHours()) + ":" +
// f(this.getUTCMinutes()) + ":" +
// f(this.getUTCSeconds()) + "Z";
// };
// You can provide an optional replacer method. It will be passed the
// key and value of each member, with this bound to the containing
// object. The value that is returned from your method will be
// serialized. If your method returns undefined, then the member will
// be excluded from the serialization.
// If the replacer parameter is an array of strings, then it will be
// used to select the members to be serialized. It filters the results
// such that only members with keys listed in the replacer array are
// stringified.
// Values that do not have JSON representations, such as undefined or
// functions, will not be serialized. Such values in objects will be
// dropped; in arrays they will be replaced with null. You can use
// a replacer function to replace those with JSON values.
// JSON.stringify(undefined) returns undefined.
// The optional space parameter produces a stringification of the
// value that is filled with line breaks and indentation to make it
// easier to read.
// If the space parameter is a non-empty string, then that string will
// be used for indentation. If the space parameter is a number, then
// the indentation will be that many spaces.
// Example:
// text = JSON.stringify(["e", {pluribus: "unum"}]);
// // text is '["e",{"pluribus":"unum"}]'
// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t");
// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
// text = JSON.stringify([new Date()], function (key, value) {
// return this[key] instanceof Date
// ? "Date(" + this[key] + ")"
// : value;
// });
// // text is '["Date(---current time---)"]'
// JSON.parse(text, reviver)
// This method parses a JSON text to produce an object or array.
// It can throw a SyntaxError exception.
// The optional reviver parameter is a function that can filter and
// transform the results. It receives each of the keys and values,
// and its return value is used instead of the original value.
// If it returns what it received, then the structure is not modified.
// If it returns undefined then the member is deleted.
// Example:
// // Parse the text. Values that look like ISO date strings will
// // be converted to Date objects.
// myData = JSON.parse(text, function (key, value) {
// var a;
// if (typeof value === "string") {
// a =
// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
// if (a) {
// return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
// +a[5], +a[6]));
// }
// }
// return value;
// });
// myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
// var d;
// if (typeof value === "string" &&
// value.slice(0, 5) === "Date(" &&
// value.slice(-1) === ")") {
// d = new Date(value.slice(5, -1));
// if (d) {
// return d;
// }
// }
// return value;
// });
// This is a reference implementation. You are free to copy, modify, or
// redistribute.
/*jslint
eval, for, this
*/
/*property
JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
lastIndex, length, parse, prototype, push, replace, slice, stringify,
test, toJSON, toString, valueOf
*/
// Create a JSON object only if one does not already exist. We create the
// methods in a closure to avoid creating global variables.
if (typeof JSON !== "object") {
JSON = {};
}
(function () {
"use strict";
var rx_one = /^[\],:{}\s]*$/;
var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g;
var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g;
var rx_four = /(?:^|:|,)(?:\s*\[)+/g;
var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
function f(n) {
// Format integers to have at least two digits.
return n < 10
? "0" + n
: n;
}
function this_value() {
return this.valueOf();
}
if (typeof Date.prototype.toJSON !== "function") {
Date.prototype.toJSON = function () {
return isFinite(this.valueOf())
? this.getUTCFullYear() + "-" +
f(this.getUTCMonth() + 1) + "-" +
f(this.getUTCDate()) + "T" +
f(this.getUTCHours()) + ":" +
f(this.getUTCMinutes()) + ":" +
f(this.getUTCSeconds()) + "Z"
: null;
};
Boolean.prototype.toJSON = this_value;
Number.prototype.toJSON = this_value;
String.prototype.toJSON = this_value;
}
var gap;
var indent;
var meta;
var rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
rx_escapable.lastIndex = 0;
return rx_escapable.test(string)
? "\"" + string.replace(rx_escapable, function (a) {
var c = meta[a];
return typeof c === "string"
? c
: "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
}) + "\""
: "\"" + string + "\"";
}
function str(key, holder) {
// Produce a string from holder[key].
var i; // The loop counter.
var k; // The member key.
var v; // The member value.
var length;
var mind = gap;
var partial;
var value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (value && typeof value === "object" &&
typeof value.toJSON === "function") {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === "function") {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value) {
case "string":
return quote(value);
case "number":
// JSON numbers must be finite. Encode non-finite numbers as null.
return isFinite(value)
? String(value)
: "null";
case "boolean":
case "null":
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce "null". The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is "object", we might be dealing with an object or an array or
// null.
case "object":
// Due to a specification blunder in ECMAScript, typeof null is "object",
// so watch out for that case.
if (!value) {
return "null";
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === "[object Array]") {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || "null";
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0
? "[]"
: gap
? "[\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "]"
: "[" + partial.join(",") + "]";
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && typeof rep === "object") {
length = rep.length;
for (i = 0; i < length; i += 1) {
if (typeof rep[i] === "string") {
k = rep[i];
v = str(k, value);
if (v) {
partial.push(quote(k) + (
gap
? ": "
: ":"
) + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.prototype.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (
gap
? ": "
: ":"
) + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0
? "{}"
: gap
? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}"
: "{" + partial.join(",") + "}";
gap = mind;
return v;
}
}
// If the JSON object does not yet have a stringify method, give it one.
if (typeof JSON.stringify !== "function") {
meta = { // table of character substitutions
"\b": "\\b",
"\t": "\\t",
"\n": "\\n",
"\f": "\\f",
"\r": "\\r",
"\"": "\\\"",
"\\": "\\\\"
};
JSON.stringify = function (value, replacer, space) {
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = "";
indent = "";
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === "number") {
for (i = 0; i < space; i += 1) {
indent += " ";
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === "string") {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, throw an error.
rep = replacer;
if (replacer && typeof replacer !== "function" &&
(typeof replacer !== "object" ||
typeof replacer.length !== "number")) {
throw new Error("JSON.stringify");
}
// Make a fake root object containing our value under the key of "".
// Return the result of stringifying the value.
return str("", {"": value});
};
}
// If the JSON object does not yet have a parse method, give it one.
if (typeof JSON.parse !== "function") {
JSON.parse = function (text, reviver) {
// The parse method takes a text and an optional reviver function, and returns
// a JavaScript value if the text is a valid JSON text.
var j;
function walk(holder, key) {
// The walk method is used to recursively walk the resulting structure so
// that modifications can be made.
var k;
var v;
var value = holder[key];
if (value && typeof value === "object") {
for (k in value) {
if (Object.prototype.hasOwnProperty.call(value, k)) {
v = walk(value, k);
if (v !== undefined) {
value[k] = v;
} else {
delete value[k];
}
}
}
}
return reviver.call(holder, key, value);
}
// Parsing happens in four stages. In the first stage, we replace certain
// Unicode characters with escape sequences. JavaScript handles many characters
// incorrectly, either silently deleting them, or treating them as line endings.
text = String(text);
rx_dangerous.lastIndex = 0;
if (rx_dangerous.test(text)) {
text = text.replace(rx_dangerous, function (a) {
return "\\u" +
("0000" + a.charCodeAt(0).toString(16)).slice(-4);
});
}
// In the second stage, we run the text against regular expressions that look
// for non-JSON patterns. We are especially concerned with "()" and "new"
// because they can cause invocation, and "=" because it can cause mutation.
// But just to be safe, we want to reject all unexpected forms.
// We split the second stage into 4 regexp operations in order to work around
// crippling inefficiencies in IE's and Safari's regexp engines. First we
// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we
// replace all simple value tokens with "]" characters. Third, we delete all
// open brackets that follow a colon or comma or that begin the text. Finally,
// we look to see that the remaining characters are only whitespace or "]" or
// "," or ":" or "{" or "}". If that is so, then the text is safe for eval.
if (
rx_one.test(
text
.replace(rx_two, "@")
.replace(rx_three, "]")
.replace(rx_four, "")
)
) {
// In the third stage we use the eval function to compile the text into a
// JavaScript structure. The "{" operator is subject to a syntactic ambiguity
// in JavaScript: it can begin a block or an object literal. We wrap the text
// in parens to eliminate the ambiguity.
j = eval("(" + text + ")");
// In the optional fourth stage, we recursively walk the new structure, passing
// each name/value pair to a reviver function for possible transformation.
return (typeof reviver === "function")
? walk({"": j}, "")
: j;
}
// If the text is not JSON parseable, then a SyntaxError is thrown.
throw new SyntaxError("JSON.parse");
};
}
}());
/***/ }),
/***/ 41:
/***/ (function(module, exports, __webpack_require__) {
var util = __webpack_require__(5)
var slice = util.slice
var assign = util.assign
var updatePlugin = __webpack_require__(42)
module.exports = [updatePlugin, operationsPlugin]
function operationsPlugin() {
return {
// array
push: push,
pop: pop,
shift: shift,
unshift: unshift,
// obj
assign: assign_,
}
// array
function push(_, key, val1, val2, val3, etc) {
return _arrayOp.call(this, 'push', arguments)
}
function pop(_, key) {
return _arrayOp.call(this, 'pop', arguments)
}
function shift(_, key) {
return _arrayOp.call(this, 'shift', arguments)
}
function unshift(_, key, val1, val2, val3, etc) {
return _arrayOp.call(this, 'unshift', arguments)
}
// obj
function assign_(_, key, props1, props2, props3, etc) {
var varArgs = slice(arguments, 2)
return this.update(key, {}, function(val) {
if (typeof val != 'object') {
throw new Error('store.assign called for non-object value with key "'+key+'"')
}
varArgs.unshift(val)
return assign.apply(Object, varArgs)
})
}
// internal
///////////
function _arrayOp(arrayFn, opArgs) {
var res
var key = opArgs[1]
var rest = slice(opArgs, 2)
this.update(key, [], function(arrVal) {
res = Array.prototype[arrayFn].apply(arrVal, rest)
})
return res
}
}
/***/ }),
/***/ 42:
/***/ (function(module, exports) {
module.exports = updatePlugin
function updatePlugin() {
return {
update: update
}
function update(_, key, optDefaultVal, updateFn) {
if (arguments.length == 3) {
updateFn = optDefaultVal
optDefaultVal = undefined
}
var val = this.get(key, optDefaultVal)
var retVal = updateFn(val)
this.set(key, retVal != undefined ? retVal : val)
}
}
/***/ }),
/***/ 5:
/***/ (function(module, exports, __webpack_require__) {
/* WEBPACK VAR INJECTION */(function(global) {var assign = make_assign()
var create = make_create()
var trim = make_trim()
var Global = (typeof window !== 'undefined' ? window : global)
module.exports = {
assign: assign,
create: create,
trim: trim,
bind: bind,
slice: slice,
each: each,
map: map,
pluck: pluck,
isList: isList,
isFunction: isFunction,
isObject: isObject,
Global: Global
}
function make_assign() {
if (Object.assign) {
return Object.assign
} else {
return function shimAssign(obj, props1, props2, etc) {
for (var i = 1; i < arguments.length; i++) {
each(Object(arguments[i]), function(val, key) {
obj[key] = val
})
}
return obj
}
}
}
function make_create() {
if (Object.create) {
return function create(obj, assignProps1, assignProps2, etc) {
var assignArgsList = slice(arguments, 1)
return assign.apply(this, [Object.create(obj)].concat(assignArgsList))
}
} else {
function F() {} // eslint-disable-line no-inner-declarations
return function create(obj, assignProps1, assignProps2, etc) {
var assignArgsList = slice(arguments, 1)
F.prototype = obj
return assign.apply(this, [new F()].concat(assignArgsList))
}
}
}
function make_trim() {
if (String.prototype.trim) {
return function trim(str) {
return String.prototype.trim.call(str)
}
} else {
return function trim(str) {
return str.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, '')
}
}
}
function bind(obj, fn) {
return function() {
return fn.apply(obj, Array.prototype.slice.call(arguments, 0))
}
}
function slice(arr, index) {
return Array.prototype.slice.call(arr, index || 0)
}
function each(obj, fn) {
pluck(obj, function(val, key) {
fn(val, key)
return false
})
}
function map(obj, fn) {
var res = (isList(obj) ? [] : {})
pluck(obj, function(v, k) {
res[k] = fn(v, k)
return false
})
return res
}
function pluck(obj, fn) {
if (isList(obj)) {
for (var i=0; i<obj.length; i++) {
if (fn(obj[i], i)) {
return obj[i]
}
}
} else {
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
if (fn(obj[key], key)) {
return obj[key]
}
}
}
}
}
function isList(val) {
return (val != null && typeof val != 'function' && typeof val.length == 'number')
}
function isFunction(val) {
return val && {}.toString.call(val) === '[object Function]'
}
function isObject(val) {
return val && {}.toString.call(val) === '[object Object]'
}
/* WEBPACK VAR INJECTION */}.call(exports, __webpack_require__(28)))
/***/ }),
/***/ 50:
/***/ (function(module, exports) {
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
// css base code, injected by the css-loader
module.exports = function(useSourceMap) {
var list = [];
// return the list of modules as css string
list.toString = function toString() {
return this.map(function (item) {
var content = cssWithMappingToString(item, useSourceMap);
if(item[2]) {
return "@media " + item[2] + "{" + content + "}";
} else {
return content;
}
}).join("");
};
// import a list of modules into the list
list.i = function(modules, mediaQuery) {
if(typeof modules === "string")
modules = [[null, modules, ""]];
var alreadyImportedModules = {};
for(var i = 0; i < this.length; i++) {
var id = this[i][0];
if(typeof id === "number")
alreadyImportedModules[id] = true;
}
for(i = 0; i < modules.length; i++) {
var item = modules[i];
// skip already imported module
// this implementation is not 100% perfect for weird media query combinations
// when a module is imported multiple times with different media queries.
// I hope this will never occur (Hey this way we have smaller bundles)
if(typeof item[0] !== "number" || !alreadyImportedModules[item[0]]) {
if(mediaQuery && !item[2]) {
item[2] = mediaQuery;
} else if(mediaQuery) {
item[2] = "(" + item[2] + ") and (" + mediaQuery + ")";
}
list.push(item);
}
}
};
return list;
};
function cssWithMappingToString(item, useSourceMap) {
var content = item[1] || '';
var cssMapping = item[3];
if (!cssMapping) {
return content;
}
if (useSourceMap && typeof btoa === 'function') {
var sourceMapping = toComment(cssMapping);
var sourceURLs = cssMapping.sources.map(function (source) {
return '/*# sourceURL=' + cssMapping.sourceRoot + source + ' */'
});
return [content].concat(sourceURLs).concat([sourceMapping]).join('\n');
}
return [content].join('\n');
}
// Adapted from convert-source-map (MIT)
function toComment(sourceMap) {
// eslint-disable-next-line no-undef
var base64 = btoa(unescape(encodeURIComponent(JSON.stringify(sourceMap))));
var data = 'sourceMappingURL=data:application/json;charset=utf-8;base64,' + base64;
return '/*# ' + data + ' */';
}
/***/ }),
/***/ 51:
/***/ (function(module, exports, __webpack_require__) {
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
var stylesInDom = {};
var memoize = function (fn) {
var memo;
return function () {
if (typeof memo === "undefined") memo = fn.apply(this, arguments);
return memo;
};
};
var isOldIE = memoize(function () {
// Test for IE <= 9 as proposed by Browserhacks
// @see http://browserhacks.com/#hack-e71d8692f65334173fee715c222cb805
// Tests for existence of standard globals is to allow style-loader
// to operate correctly into non-standard environments
// @see https://github.com/webpack-contrib/style-loader/issues/177
return window && document && document.all && !window.atob;
});
var getElement = (function (fn) {
var memo = {};
return function(selector) {
if (typeof memo[selector] === "undefined") {
var styleTarget = fn.call(this, selector);
// Special case to return head of iframe instead of iframe itself
if (styleTarget instanceof window.HTMLIFrameElement) {
try {
// This will throw an exception if access to iframe is blocked
// due to cross-origin restrictions
styleTarget = styleTarget.contentDocument.head;
} catch(e) {
styleTarget = null;
}
}
memo[selector] = styleTarget;
}
return memo[selector]
};
})(function (target) {
return document.querySelector(target)
});
var singleton = null;
var singletonCounter = 0;
var stylesInsertedAtTop = [];
var fixUrls = __webpack_require__(52);
module.exports = function(list, options) {
if (typeof DEBUG !== "undefined" && DEBUG) {
if (typeof document !== "object") throw new Error("The style-loader cannot be used in a non-browser environment");
}
options = options || {};
options.attrs = typeof options.attrs === "object" ? options.attrs : {};
// Force single-tag solution on IE6-9, which has a hard limit on the # of <style>
// tags it will allow on a page
if (!options.singleton && typeof options.singleton !== "boolean") options.singleton = isOldIE();
// By default, add <style> tags to the <head> element
if (!options.insertInto) options.insertInto = "head";
// By default, add <style> tags to the bottom of the target
if (!options.insertAt) options.insertAt = "bottom";
var styles = listToStyles(list, options);
addStylesToDom(styles, options);
return function update (newList) {
var mayRemove = [];
for (var i = 0; i < styles.length; i++) {
var item = styles[i];
var domStyle = stylesInDom[item.id];
domStyle.refs--;
mayRemove.push(domStyle);
}
if(newList) {
var newStyles = listToStyles(newList, options);
addStylesToDom(newStyles, options);
}
for (var i = 0; i < mayRemove.length; i++) {
var domStyle = mayRemove[i];
if(domStyle.refs === 0) {
for (var j = 0; j < domStyle.parts.length; j++) domStyle.parts[j]();
delete stylesInDom[domStyle.id];
}
}
};
};
function addStylesToDom (styles, options) {
for (var i = 0; i < styles.length; i++) {
var item = styles[i];
var domStyle = stylesInDom[item.id];
if(domStyle) {
domStyle.refs++;
for(var j = 0; j < domStyle.parts.length; j++) {
domStyle.parts[j](item.parts[j]);
}
for(; j < item.parts.length; j++) {
domStyle.parts.push(addStyle(item.parts[j], options));
}
} else {
var parts = [];
for(var j = 0; j < item.parts.length; j++) {
parts.push(addStyle(item.parts[j], options));
}
stylesInDom[item.id] = {id: item.id, refs: 1, parts: parts};
}
}
}
function listToStyles (list, options) {
var styles = [];
var newStyles = {};
for (var i = 0; i < list.length; i++) {
var item = list[i];
var id = options.base ? item[0] + options.base : item[0];
var css = item[1];
var media = item[2];
var sourceMap = item[3];
var part = {css: css, media: media, sourceMap: sourceMap};
if(!newStyles[id]) styles.push(newStyles[id] = {id: id, parts: [part]});
else newStyles[id].parts.push(part);
}
return styles;
}
function insertStyleElement (options, style) {
var target = getElement(options.insertInto)
if (!target) {
throw new Error("Couldn't find a style target. This probably means that the value for the 'insertInto' parameter is invalid.");
}
var lastStyleElementInsertedAtTop = stylesInsertedAtTop[stylesInsertedAtTop.length - 1];
if (options.insertAt === "top") {
if (!lastStyleElementInsertedAtTop) {
target.insertBefore(style, target.firstChild);
} else if (lastStyleElementInsertedAtTop.nextSibling) {
target.insertBefore(style, lastStyleElementInsertedAtTop.nextSibling);
} else {
target.appendChild(style);
}
stylesInsertedAtTop.push(style);
} else if (options.insertAt === "bottom") {
target.appendChild(style);
} else if (typeof options.insertAt === "object" && options.insertAt.before) {
var nextSibling = getElement(options.insertInto + " " + options.insertAt.before);
target.insertBefore(style, nextSibling);
} else {
throw new Error("[Style Loader]\n\n Invalid value for parameter 'insertAt' ('options.insertAt') found.\n Must be 'top', 'bottom', or Object.\n (https://github.com/webpack-contrib/style-loader#insertat)\n");
}
}
function removeStyleElement (style) {
if (style.parentNode === null) return false;
style.parentNode.removeChild(style);
var idx = stylesInsertedAtTop.indexOf(style);
if(idx >= 0) {
stylesInsertedAtTop.splice(idx, 1);
}
}
function createStyleElement (options) {
var style = document.createElement("style");
options.attrs.type = "text/css";
addAttrs(style, options.attrs);
insertStyleElement(options, style);
return style;
}
function createLinkElement (options) {
var link = document.createElement("link");
options.attrs.type = "text/css";
options.attrs.rel = "stylesheet";
addAttrs(link, options.attrs);
insertStyleElement(options, link);
return link;
}
function addAttrs (el, attrs) {
Object.keys(attrs).forEach(function (key) {
el.setAttribute(key, attrs[key]);
});
}
function addStyle (obj, options) {
var style, update, remove, result;
// If a transform function was defined, run it on the css
if (options.transform && obj.css) {
result = options.transform(obj.css);
if (result) {
// If transform returns a value, use that instead of the original css.
// This allows running runtime transformations on the css.
obj.css = result;
} else {
// If the transform function returns a falsy value, don't add this css.
// This allows conditional loading of css
return function() {
// noop
};
}
}
if (options.singleton) {
var styleIndex = singletonCounter++;
style = singleton || (singleton = createStyleElement(options));
update = applyToSingletonTag.bind(null, style, styleIndex, false);
remove = applyToSingletonTag.bind(null, style, styleIndex, true);
} else if (
obj.sourceMap &&
typeof URL === "function" &&
typeof URL.createObjectURL === "function" &&
typeof URL.revokeObjectURL === "function" &&
typeof Blob === "function" &&
typeof btoa === "function"
) {
style = createLinkElement(options);
update = updateLink.bind(null, style, options);
remove = function () {
removeStyleElement(style);
if(style.href) URL.revokeObjectURL(style.href);
};
} else {
style = createStyleElement(options);
update = applyToTag.bind(null, style);
remove = function () {
removeStyleElement(style);
};
}
update(obj);
return function updateStyle (newObj) {
if (newObj) {
if (
newObj.css === obj.css &&
newObj.media === obj.media &&
newObj.sourceMap === obj.sourceMap
) {
return;
}
update(obj = newObj);
} else {
remove();
}
};
}
var replaceText = (function () {
var textStore = [];
return function (index, replacement) {
textStore[index] = replacement;
return textStore.filter(Boolean).join('\n');
};
})();
function applyToSingletonTag (style, index, remove, obj) {
var css = remove ? "" : obj.css;
if (style.styleSheet) {
style.styleSheet.cssText = replaceText(index, css);
} else {
var cssNode = document.createTextNode(css);
var childNodes = style.childNodes;
if (childNodes[index]) style.removeChild(childNodes[index]);
if (childNodes.length) {
style.insertBefore(cssNode, childNodes[index]);
} else {
style.appendChild(cssNode);
}
}
}
function applyToTag (style, obj) {
var css = obj.css;
var media = obj.media;
if(media) {
style.setAttribute("media", media)
}
if(style.styleSheet) {
style.styleSheet.cssText = css;
} else {
while(style.firstChild) {
style.removeChild(style.firstChild);
}
style.appendChild(document.createTextNode(css));
}
}
function updateLink (link, options, obj) {
var css = obj.css;
var sourceMap = obj.sourceMap;
/*
If convertToAbsoluteUrls isn't defined, but sourcemaps are enabled
and there is no publicPath defined then lets turn convertToAbsoluteUrls
on by default. Otherwise default to the convertToAbsoluteUrls option
directly
*/
var autoFixUrls = options.convertToAbsoluteUrls === undefined && sourceMap;
if (options.convertToAbsoluteUrls || autoFixUrls) {
css = fixUrls(css);
}
if (sourceMap) {
// http://stackoverflow.com/a/26603875
css += "\n/*# sourceMappingURL=data:application/json;base64," + btoa(unescape(encodeURIComponent(JSON.stringify(sourceMap)))) + " */";
}
var blob = new Blob([css], { type: "text/css" });
var oldSrc = link.href;
link.href = URL.createObjectURL(blob);
if(oldSrc) URL.revokeObjectURL(oldSrc);
}
/***/ }),
/***/ 52:
/***/ (function(module, exports) {
/**
* When source maps are enabled, `style-loader` uses a link element with a data-uri to
* embed the css on the page. This breaks all relative urls because now they are relative to a
* bundle instead of the current page.
*
* One solution is to only use full urls, but that may be impossible.
*
* Instead, this function "fixes" the relative urls to be absolute according to the current page location.
*
* A rudimentary test suite is located at `test/fixUrls.js` and can be run via the `npm test` command.
*
*/
module.exports = function (css) {
// get current location
var location = typeof window !== "undefined" && window.location;
if (!location) {
throw new Error("fixUrls requires window.location");
}
// blank or null?
if (!css || typeof css !== "string") {
return css;
}
var baseUrl = location.protocol + "//" + location.host;
var currentDir = baseUrl + location.pathname.replace(/\/[^\/]*$/, "/");
// convert each url(...)
/*
This regular expression is just a way to recursively match brackets within
a string.
/url\s*\( = Match on the word "url" with any whitespace after it and then a parens
( = Start a capturing group
(?: = Start a non-capturing group
[^)(] = Match anything that isn't a parentheses
| = OR
\( = Match a start parentheses
(?: = Start another non-capturing groups
[^)(]+ = Match anything that isn't a parentheses
| = OR
\( = Match a start parentheses
[^)(]* = Match anything that isn't a parentheses
\) = Match a end parentheses
) = End Group
*\) = Match anything and then a close parens
) = Close non-capturing group
* = Match anything
) = Close capturing group
\) = Match a close parens
/gi = Get all matches, not the first. Be case insensitive.
*/
var fixedCss = css.replace(/url\s*\(((?:[^)(]|\((?:[^)(]+|\([^)(]*\))*\))*)\)/gi, function(fullMatch, origUrl) {
// strip quotes (if they exist)
var unquotedOrigUrl = origUrl
.trim()
.replace(/^"(.*)"$/, function(o, $1){ return $1; })
.replace(/^'(.*)'$/, function(o, $1){ return $1; });
// already a full url? no change
if (/^(#|data:|http:\/\/|https:\/\/|file:\/\/\/)/i.test(unquotedOrigUrl)) {
return fullMatch;
}
// convert the url to a full url
var newUrl;
if (unquotedOrigUrl.indexOf("//") === 0) {
//TODO: should we add protocol?
newUrl = unquotedOrigUrl;
} else if (unquotedOrigUrl.indexOf("/") === 0) {
// path should be relative to the base url
newUrl = baseUrl + unquotedOrigUrl; // already starts with '/'
} else {
// path should be relative to current directory
newUrl = currentDir + unquotedOrigUrl.replace(/^\.\//, ""); // Strip leading './'
}
// send back the fixed url(...)
return "url(" + JSON.stringify(newUrl) + ")";
});
// send back the fixed css
return fixedCss;
};
/***/ }),
/***/ 610:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var _utils = __webpack_require__(1);
var _reducers = __webpack_require__(611);
var _reducers2 = _interopRequireDefault(_reducers);
var _NavigationsApp = __webpack_require__(612);
var _NavigationsApp2 = _interopRequireDefault(_NavigationsApp);
var _actions = __webpack_require__(125);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var store = (0, _utils.createStore)(_reducers2.default);
if (window.viewData) {
store.dispatch((0, _actions.initViewData)(window.viewData));
}
(0, _utils.render)(store, _NavigationsApp2.default, document.getElementById('settings-app'));
/***/ }),
/***/ 611:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
exports.default = navigationReducer;
var _actions = __webpack_require__(125);
var initialState = {
query: null,
navigations: [],
navigationsById: {},
activeNavigationId: null,
isLoading: false,
totalNavigations: null,
activeQuery: null,
products: []
};
function navigationReducer() {
var state = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : initialState;
var action = arguments[1];
switch (action.type) {
case _actions.SELECT_NAVIGATION:
{
var defaultNavigation = {
is_enabled: true,
name: '',
description: ''
};
return _extends({}, state, {
activeNavigationId: action.id || null,
navigationToEdit: action.id ? Object.assign(defaultNavigation, state.navigationsById[action.id]) : null,
errors: null
});
}
case _actions.EDIT_NAVIGATION:
{
var target = action.event.target;
var field = target.name;
var navigation = state.navigationToEdit;
navigation[field] = target.type === 'checkbox' ? target.checked : target.value;
return _extends({}, state, { navigationToEdit: navigation, errors: null });
}
case _actions.NEW_NAVIGATION:
{
var navigationToEdit = {
is_enabled: true,
name: '',
description: ''
};
return _extends({}, state, { navigationToEdit: navigationToEdit, errors: null });
}
case _actions.CANCEL_EDIT:
{
return _extends({}, state, { navigationToEdit: null, errors: null });
}
case _actions.SET_QUERY:
return _extends({}, state, { query: action.query });
case _actions.SET_ERROR:
return _extends({}, state, { errors: action.errors });
case _actions.QUERY_NAVIGATIONS:
return _extends({}, state, {
isLoading: true,
totalNavigations: null,
navigationToEdit: null,
activeQuery: state.query });
case _actions.GET_NAVIGATIONS:
{
var navigationsById = Object.assign({}, state.navigationsById);
var navigations = action.data.map(function (navigation) {
navigationsById[navigation._id] = navigation;
return navigation._id;
});
return _extends({}, state, { navigations: navigations, navigationsById: navigationsById, isLoading: false, totalNavigations: navigations.length });
}
case _actions.GET_PRODUCTS:
{
return _extends({}, state, { products: action.data });
}
default:
return state;
}
}
/***/ }),
/***/ 612:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _reactRedux = __webpack_require__(6);
var _actions = __webpack_require__(125);
var _Navigations = __webpack_require__(613);
var _Navigations2 = _interopRequireDefault(_Navigations);
var _ListBar = __webpack_require__(75);
var _ListBar2 = _interopRequireDefault(_ListBar);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var NavigationsApp = function (_React$Component) {
_inherits(NavigationsApp, _React$Component);
function NavigationsApp(props, context) {
_classCallCheck(this, NavigationsApp);
return _possibleConstructorReturn(this, (NavigationsApp.__proto__ || Object.getPrototypeOf(NavigationsApp)).call(this, props, context));
}
_createClass(NavigationsApp, [{
key: 'render',
value: function render() {
return [_react2.default.createElement(_ListBar2.default, {
key: 'NavigationBar',
onNewItem: this.props.newNavigation,
setQuery: this.props.setQuery,
fetch: this.props.fetchNavigations,
buttonName: 'Navigation'
}), _react2.default.createElement(_Navigations2.default, { key: 'Navigations'
})];
}
}]);
return NavigationsApp;
}(_react2.default.Component);
NavigationsApp.propTypes = {
newNavigation: _propTypes2.default.func,
fetchNavigations: _propTypes2.default.func,
setQuery: _propTypes2.default.func
};
var mapDispatchToProps = {
newNavigation: _actions.newNavigation,
fetchNavigations: _actions.fetchNavigations,
setQuery: _actions.setQuery
};
exports.default = (0, _reactRedux.connect)(null, mapDispatchToProps)(NavigationsApp);
/***/ }),
/***/ 613:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _reactRedux = __webpack_require__(6);
var _EditNavigation = __webpack_require__(614);
var _EditNavigation2 = _interopRequireDefault(_EditNavigation);
var _NavigationList = __webpack_require__(615);
var _NavigationList2 = _interopRequireDefault(_NavigationList);
var _SearchResultsInfo = __webpack_require__(62);
var _SearchResultsInfo2 = _interopRequireDefault(_SearchResultsInfo);
var _actions = __webpack_require__(125);
var _utils = __webpack_require__(1);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var Navigations = function (_React$Component) {
_inherits(Navigations, _React$Component);
function Navigations(props, context) {
_classCallCheck(this, Navigations);
var _this = _possibleConstructorReturn(this, (Navigations.__proto__ || Object.getPrototypeOf(Navigations)).call(this, props, context));
_this.isFormValid = _this.isFormValid.bind(_this);
_this.save = _this.save.bind(_this);
_this.deleteNavigation = _this.deleteNavigation.bind(_this);
return _this;
}
_createClass(Navigations, [{
key: 'isFormValid',
value: function isFormValid() {
var valid = true;
var errors = {};
if (!this.props.navigationToEdit.name) {
errors.name = ['Please provide navigation name'];
valid = false;
}
this.props.dispatch((0, _actions.setError)(errors));
return valid;
}
}, {
key: 'save',
value: function save(event) {
event.preventDefault();
if (!this.isFormValid()) {
return;
}
this.props.saveNavigation();
}
}, {
key: 'deleteNavigation',
value: function deleteNavigation(event) {
event.preventDefault();
if (confirm((0, _utils.gettext)('Would you like to delete navigation: {{name}}', { name: this.props.navigationToEdit.name }))) {
this.props.deleteNavigation();
}
}
}, {
key: 'render',
value: function render() {
var progressStyle = { width: '25%' };
return _react2.default.createElement(
'div',
{ className: 'flex-row' },
this.props.isLoading ? _react2.default.createElement(
'div',
{ className: 'col d' },
_react2.default.createElement(
'div',
{ className: 'progress' },
_react2.default.createElement('div', { className: 'progress-bar', style: progressStyle })
)
) : _react2.default.createElement(
'div',
{ className: 'flex-col flex-column' },
this.props.activeQuery && _react2.default.createElement(_SearchResultsInfo2.default, {
totalItems: this.props.totalNavigations,
query: this.props.activeQuery }),
_react2.default.createElement(_NavigationList2.default, {
navigations: this.props.navigations,
onClick: this.props.selectNavigation,
activeNavigationId: this.props.activeNavigationId })
),
this.props.navigationToEdit && _react2.default.createElement(_EditNavigation2.default, {
navigation: this.props.navigationToEdit,
onChange: this.props.editNavigation,
errors: this.props.errors,
onSave: this.save,
onClose: this.props.cancelEdit,
onDelete: this.deleteNavigation,
products: this.props.products,
saveProducts: this.props.saveProducts,
fetchProducts: this.props.fetchProducts
})
);
}
}]);
return Navigations;
}(_react2.default.Component);
Navigations.propTypes = {
navigations: _propTypes2.default.arrayOf(_propTypes2.default.object),
navigationToEdit: _propTypes2.default.object,
activeNavigationId: _propTypes2.default.string,
selectNavigation: _propTypes2.default.func,
editNavigation: _propTypes2.default.func,
saveNavigation: _propTypes2.default.func,
deleteNavigation: _propTypes2.default.func,
newNavigation: _propTypes2.default.func,
cancelEdit: _propTypes2.default.func,
isLoading: _propTypes2.default.bool,
activeQuery: _propTypes2.default.string,
totalNavigations: _propTypes2.default.number,
errors: _propTypes2.default.object,
dispatch: _propTypes2.default.func,
products: _propTypes2.default.arrayOf(_propTypes2.default.object),
saveProducts: _propTypes2.default.func.isRequired,
fetchProducts: _propTypes2.default.func.isRequired
};
var mapStateToProps = function mapStateToProps(state) {
return {
navigations: state.navigations.map(function (id) {
return state.navigationsById[id];
}),
navigationToEdit: state.navigationToEdit,
activeNavigationId: state.activeNavigationId,
isLoading: state.isLoading,
activeQuery: state.activeQuery,
totalNavigations: state.totalNavigations,
errors: state.errors,
products: state.products
};
};
var mapDispatchToProps = function mapDispatchToProps(dispatch) {
return {
selectNavigation: function selectNavigation(_id) {
return dispatch((0, _actions.selectNavigation)(_id));
},
editNavigation: function editNavigation(event) {
return dispatch((0, _actions.editNavigation)(event));
},
saveNavigation: function saveNavigation(type) {
return dispatch((0, _actions.postNavigation)(type));
},
deleteNavigation: function deleteNavigation(type) {
return dispatch((0, _actions.deleteNavigation)(type));
},
newNavigation: function newNavigation() {
return dispatch((0, _actions.newNavigation)());
},
cancelEdit: function cancelEdit(event) {
return dispatch((0, _actions.cancelEdit)(event));
},
saveProducts: function saveProducts(products) {
return dispatch((0, _actions.saveProducts)(products));
},
fetchProducts: function fetchProducts() {
return dispatch((0, _actions.fetchProducts)());
},
dispatch: dispatch
};
};
exports.default = (0, _reactRedux.connect)(mapStateToProps, mapDispatchToProps)(Navigations);
/***/ }),
/***/ 614:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _TextInput = __webpack_require__(26);
var _TextInput2 = _interopRequireDefault(_TextInput);
var _CheckboxInput = __webpack_require__(27);
var _CheckboxInput2 = _interopRequireDefault(_CheckboxInput);
var _EditPanel = __webpack_require__(100);
var _EditPanel2 = _interopRequireDefault(_EditPanel);
var _utils = __webpack_require__(1);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var EditNavigation = function (_React$Component) {
_inherits(EditNavigation, _React$Component);
function EditNavigation(props) {
_classCallCheck(this, EditNavigation);
var _this = _possibleConstructorReturn(this, (EditNavigation.__proto__ || Object.getPrototypeOf(EditNavigation)).call(this, props));
_this.handleTabClick = _this.handleTabClick.bind(_this);
_this.state = { activeTab: 'navigation-details' };
_this.tabs = [{ label: (0, _utils.gettext)('Navigation'), name: 'navigation-details' }, { label: (0, _utils.gettext)('Products'), name: 'products' }];
return _this;
}
_createClass(EditNavigation, [{
key: 'handleTabClick',
value: function handleTabClick(event) {
this.setState({ activeTab: event.target.name });
if (event.target.name === 'products' && this.props.navigation._id) {
this.props.fetchProducts();
}
}
}, {
key: 'getNavigationProducts',
value: function getNavigationProducts() {
var _this2 = this;
var products = this.props.products.filter(function (product) {
return product.navigations && product.navigations.includes(_this2.props.navigation._id);
}).map(function (p) {
return p._id;
});
return { _id: this.props.navigation._id, products: products };
}
}, {
key: 'render',
value: function render() {
var _this3 = this;
return _react2.default.createElement(
'div',
{ className: 'list-item__preview' },
_react2.default.createElement(
'div',
{ className: 'list-item__preview-header' },
_react2.default.createElement(
'h3',
null,
this.props.navigation.name
),
_react2.default.createElement(
'button',
{
id: 'hide-sidebar',
type: 'button',
className: 'icon-button',
'data-dismiss': 'modal',
'aria-label': 'Close',
onClick: this.props.onClose },
_react2.default.createElement('i', { className: 'icon--close-thin icon--gray', 'aria-hidden': 'true' })
)
),
_react2.default.createElement(
'ul',
{ className: 'nav nav-tabs' },
this.tabs.map(function (tab) {
return _react2.default.createElement(
'li',
{ key: tab.name, className: 'nav-item' },
_react2.default.createElement(
'a',
{
name: tab.name,
className: 'nav-link ' + (_this3.state.activeTab === tab.name && 'active'),
href: '#',
onClick: _this3.handleTabClick },
tab.label
)
);
})
),
_react2.default.createElement(
'div',
{ className: 'tab-content' },
this.state.activeTab === 'navigation-details' && _react2.default.createElement(
'div',
{ className: 'tab-pane active', id: 'navigation-details' },
_react2.default.createElement(
'form',
null,
_react2.default.createElement(
'div',
{ className: 'list-item__preview-form' },
_react2.default.createElement(_TextInput2.default, {
name: 'name',
label: (0, _utils.gettext)('Name'),
value: this.props.navigation.name,
onChange: this.props.onChange,
error: this.props.errors ? this.props.errors.name : null }),
_react2.default.createElement(_TextInput2.default, {
name: 'description',
label: (0, _utils.gettext)('Description'),
value: this.props.navigation.description,
onChange: this.props.onChange,
error: this.props.errors ? this.props.errors.description : null }),
_react2.default.createElement(_CheckboxInput2.default, {
name: 'is_enabled',
label: (0, _utils.gettext)('Enabled'),
value: this.props.navigation.is_enabled,
onChange: this.props.onChange })
),
_react2.default.createElement(
'div',
{ className: 'list-item__preview-footer' },
_react2.default.createElement('input', {
type: 'button',
className: 'btn btn-outline-primary',
value: (0, _utils.gettext)('Save'),
onClick: this.props.onSave }),
_react2.default.createElement('input', {
type: 'button',
className: 'btn btn-outline-secondary',
value: (0, _utils.gettext)('Delete'),
onClick: this.props.onDelete })
)
)
),
this.state.activeTab === 'products' && _react2.default.createElement(_EditPanel2.default, {
parent: this.getNavigationProducts(),
items: this.props.products,
field: 'products',
onSave: this.props.saveProducts
})
)
);
}
}]);
return EditNavigation;
}(_react2.default.Component);
EditNavigation.propTypes = {
navigation: _propTypes2.default.object.isRequired,
onChange: _propTypes2.default.func,
errors: _propTypes2.default.object,
products: _propTypes2.default.arrayOf(_propTypes2.default.object),
onSave: _propTypes2.default.func.isRequired,
onClose: _propTypes2.default.func.isRequired,
onDelete: _propTypes2.default.func.isRequired,
saveProducts: _propTypes2.default.func.isRequired,
fetchProducts: _propTypes2.default.func.isRequired
};
exports.default = EditNavigation;
/***/ }),
/***/ 615:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _NavigationListItem = __webpack_require__(616);
var _NavigationListItem2 = _interopRequireDefault(_NavigationListItem);
var _utils = __webpack_require__(1);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function NavigationList(_ref) {
var navigations = _ref.navigations,
onClick = _ref.onClick,
activeNavigationId = _ref.activeNavigationId;
var list = navigations.map(function (navigation) {
return _react2.default.createElement(_NavigationListItem2.default, {
key: navigation._id,
navigation: navigation,
onClick: onClick,
isActive: activeNavigationId === navigation._id });
});
return _react2.default.createElement(
'section',
{ className: 'content-main' },
_react2.default.createElement(
'div',
{ className: 'list-items-container' },
_react2.default.createElement(
'table',
{ className: 'table table-hover' },
_react2.default.createElement(
'thead',
null,
_react2.default.createElement(
'tr',
null,
_react2.default.createElement(
'th',
null,
(0, _utils.gettext)('Name')
),
_react2.default.createElement(
'th',
null,
(0, _utils.gettext)('Description')
),
_react2.default.createElement(
'th',
null,
(0, _utils.gettext)('Status')
),
_react2.default.createElement(
'th',
null,
(0, _utils.gettext)('Created On')
)
)
),
_react2.default.createElement(
'tbody',
null,
list
)
)
)
);
}
NavigationList.propTypes = {
navigations: _propTypes2.default.array.isRequired,
onClick: _propTypes2.default.func.isRequired,
activeNavigationId: _propTypes2.default.string
};
exports.default = NavigationList;
/***/ }),
/***/ 616:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _utils = __webpack_require__(1);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function NavigationListItem(_ref) {
var navigation = _ref.navigation,
isActive = _ref.isActive,
_onClick = _ref.onClick;
return _react2.default.createElement(
'tr',
{ key: navigation._id,
className: isActive ? 'table--selected' : null,
onClick: function onClick() {
return _onClick(navigation._id);
} },
_react2.default.createElement(
'td',
{ className: 'name' },
navigation.name
),
_react2.default.createElement(
'td',
null,
navigation.description
),
_react2.default.createElement(
'td',
null,
navigation.is_enabled ? (0, _utils.gettext)('Enabled') : (0, _utils.gettext)('Disabled')
),
_react2.default.createElement(
'td',
null,
(0, _utils.shortDate)(navigation._created)
)
);
}
NavigationListItem.propTypes = {
navigation: _propTypes2.default.object,
isActive: _propTypes2.default.bool,
onClick: _propTypes2.default.func
};
exports.default = NavigationListItem;
/***/ }),
/***/ 62:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/* WEBPACK VAR INJECTION */(function($) {
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _classnames = __webpack_require__(14);
var _classnames2 = _interopRequireDefault(_classnames);
__webpack_require__(63);
var _lodash = __webpack_require__(7);
var _utils = __webpack_require__(1);
var _utils2 = __webpack_require__(1);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var SearchResultsInfo = function (_React$Component) {
_inherits(SearchResultsInfo, _React$Component);
function SearchResultsInfo() {
_classCallCheck(this, SearchResultsInfo);
return _possibleConstructorReturn(this, (SearchResultsInfo.__proto__ || Object.getPrototypeOf(SearchResultsInfo)).apply(this, arguments));
}
_createClass(SearchResultsInfo, [{
key: 'componentDidMount',
value: function componentDidMount() {
if (!(0, _utils2.isTouchDevice)()) {
this.elem && $(this.elem).tooltip();
}
}
}, {
key: 'componentWillUnmount',
value: function componentWillUnmount() {
this.elem && $(this.elem).tooltip('dispose'); // make sure it's gone
}
}, {
key: 'componentWillUpdate',
value: function componentWillUpdate() {
this.componentWillUnmount();
}
}, {
key: 'componentDidUpdate',
value: function componentDidUpdate() {
this.componentDidMount();
}
}, {
key: 'render',
value: function render() {
var _this2 = this;
var isFollowing = this.props.user && this.props.activeTopic;
var displayFollowTopic = this.props.user && !this.props.bookmarks && !(0, _lodash.isEmpty)(this.props.searchCriteria);
var displayTotalItems = this.props.bookmarks || !(0, _lodash.isEmpty)(this.props.searchCriteria) || this.props.activeTopic || this.props.resultsFiltered;
var displayHeader = !(0, _lodash.isEmpty)(this.props.newItems) || displayTotalItems || displayFollowTopic || this.props.query;
return displayHeader ? _react2.default.createElement(
'div',
{ className: (0, _classnames2.default)('wire-column__main-header d-flex mt-0 px-3 align-items-center flex-wrap flex-sm-nowrap', this.props.scrollClass) },
_react2.default.createElement(
'div',
{ className: 'navbar-text search-results-info' },
displayTotalItems && _react2.default.createElement(
'span',
{ className: 'search-results-info__num' },
this.props.totalItems
),
this.props.query && _react2.default.createElement(
'span',
{ className: 'search-results-info__text' },
(0, _utils.gettext)('search results for:'),
_react2.default.createElement('br', null),
_react2.default.createElement(
'b',
null,
this.props.query
)
)
),
displayFollowTopic && _react2.default.createElement(
'button',
{
disabled: isFollowing,
className: 'btn btn-outline-primary btn-sm d-none d-sm-block',
onClick: function onClick() {
return _this2.props.followTopic(_this2.props.searchCriteria);
}
},
(0, _utils.gettext)('Save as topic')
),
displayFollowTopic && _react2.default.createElement(
'button',
{
disabled: isFollowing,
className: 'btn btn-outline-primary btn-sm d-block d-sm-none',
onClick: function onClick() {
return _this2.props.followTopic(_this2.props.searchCriteria);
}
},
(0, _utils.gettext)('S')
),
_react2.default.createElement(
'div',
{ className: 'd-flex align-items-center ml-auto' },
!(0, _lodash.isEmpty)(this.props.newItems) && _react2.default.createElement(
'button',
{
type: 'button',
ref: function ref(elem) {
return _this2.elem = elem;
},
title: (0, _utils.gettext)('New stories available to load'),
className: 'button__reset-styles d-flex align-items-center ml-3',
onClick: this.props.refresh },
_react2.default.createElement('i', { className: 'icon--refresh icon--pink' }),
_react2.default.createElement(
'span',
{ className: 'badge badge-pill badge-info badge-secondary ml-2' },
this.props.newItems.length
)
)
)
) : null;
}
}]);
return SearchResultsInfo;
}(_react2.default.Component);
SearchResultsInfo.propTypes = {
user: _propTypes2.default.string,
query: _propTypes2.default.string,
totalItems: _propTypes2.default.number,
followTopic: _propTypes2.default.func,
bookmarks: _propTypes2.default.bool,
newItems: _propTypes2.default.array,
refresh: _propTypes2.default.func,
searchCriteria: _propTypes2.default.object,
activeTopic: _propTypes2.default.object,
toggleNews: _propTypes2.default.func,
activeNavigation: _propTypes2.default.string,
newsOnly: _propTypes2.default.bool,
scrollClass: _propTypes2.default.string,
resultsFiltered: _propTypes2.default.bool
};
exports.default = SearchResultsInfo;
/* WEBPACK VAR INJECTION */}.call(exports, __webpack_require__(23)))
/***/ }),
/***/ 63:
/***/ (function(module, exports, __webpack_require__) {
// style-loader: Adds some css to the DOM by adding a <style> tag
// load the styles
var content = __webpack_require__(64);
if(typeof content === 'string') content = [[module.i, content, '']];
// Prepare cssTransformation
var transform;
var options = {"hmr":true}
options.transform = transform
// add the styles to the DOM
var update = __webpack_require__(51)(content, options);
if(content.locals) module.exports = content.locals;
// Hot Module Replacement
if(false) {
// When the styles change, update the <style> tags
if(!content.locals) {
module.hot.accept("!!../css-loader/index.js!./style.css", function() {
var newContent = require("!!../css-loader/index.js!./style.css");
if(typeof newContent === 'string') newContent = [[module.id, newContent, '']];
update(newContent);
});
}
// When the module is disposed, remove the <style> tags
module.hot.dispose(function() { update(); });
}
/***/ }),
/***/ 64:
/***/ (function(module, exports, __webpack_require__) {
exports = module.exports = __webpack_require__(50)(false);
// imports
// module
exports.push([module.i, ".react-toggle {\n touch-action: pan-x;\n\n display: inline-block;\n position: relative;\n cursor: pointer;\n background-color: transparent;\n border: 0;\n padding: 0;\n\n -webkit-touch-callout: none;\n -webkit-user-select: none;\n -khtml-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n\n -webkit-tap-highlight-color: rgba(0,0,0,0);\n -webkit-tap-highlight-color: transparent;\n}\n\n.react-toggle-screenreader-only {\n border: 0;\n clip: rect(0 0 0 0);\n height: 1px;\n margin: -1px;\n overflow: hidden;\n padding: 0;\n position: absolute;\n width: 1px;\n}\n\n.react-toggle--disabled {\n cursor: not-allowed;\n opacity: 0.5;\n -webkit-transition: opacity 0.25s;\n transition: opacity 0.25s;\n}\n\n.react-toggle-track {\n width: 50px;\n height: 24px;\n padding: 0;\n border-radius: 30px;\n background-color: #4D4D4D;\n -webkit-transition: all 0.2s ease;\n -moz-transition: all 0.2s ease;\n transition: all 0.2s ease;\n}\n\n.react-toggle:hover:not(.react-toggle--disabled) .react-toggle-track {\n background-color: #000000;\n}\n\n.react-toggle--checked .react-toggle-track {\n background-color: #19AB27;\n}\n\n.react-toggle--checked:hover:not(.react-toggle--disabled) .react-toggle-track {\n background-color: #128D15;\n}\n\n.react-toggle-track-check {\n position: absolute;\n width: 14px;\n height: 10px;\n top: 0px;\n bottom: 0px;\n margin-top: auto;\n margin-bottom: auto;\n line-height: 0;\n left: 8px;\n opacity: 0;\n -webkit-transition: opacity 0.25s ease;\n -moz-transition: opacity 0.25s ease;\n transition: opacity 0.25s ease;\n}\n\n.react-toggle--checked .react-toggle-track-check {\n opacity: 1;\n -webkit-transition: opacity 0.25s ease;\n -moz-transition: opacity 0.25s ease;\n transition: opacity 0.25s ease;\n}\n\n.react-toggle-track-x {\n position: absolute;\n width: 10px;\n height: 10px;\n top: 0px;\n bottom: 0px;\n margin-top: auto;\n margin-bottom: auto;\n line-height: 0;\n right: 10px;\n opacity: 1;\n -webkit-transition: opacity 0.25s ease;\n -moz-transition: opacity 0.25s ease;\n transition: opacity 0.25s ease;\n}\n\n.react-toggle--checked .react-toggle-track-x {\n opacity: 0;\n}\n\n.react-toggle-thumb {\n transition: all 0.5s cubic-bezier(0.23, 1, 0.32, 1) 0ms;\n position: absolute;\n top: 1px;\n left: 1px;\n width: 22px;\n height: 22px;\n border: 1px solid #4D4D4D;\n border-radius: 50%;\n background-color: #FAFAFA;\n\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n\n -webkit-transition: all 0.25s ease;\n -moz-transition: all 0.25s ease;\n transition: all 0.25s ease;\n}\n\n.react-toggle--checked .react-toggle-thumb {\n left: 27px;\n border-color: #19AB27;\n}\n\n.react-toggle--focus .react-toggle-thumb {\n -webkit-box-shadow: 0px 0px 3px 2px #0099E0;\n -moz-box-shadow: 0px 0px 3px 2px #0099E0;\n box-shadow: 0px 0px 2px 3px #0099E0;\n}\n\n.react-toggle:active:not(.react-toggle--disabled) .react-toggle-thumb {\n -webkit-box-shadow: 0px 0px 5px 5px #0099E0;\n -moz-box-shadow: 0px 0px 5px 5px #0099E0;\n box-shadow: 0px 0px 5px 5px #0099E0;\n}\n", ""]);
// exports
/***/ }),
/***/ 65:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _classnames = __webpack_require__(14);
var _classnames2 = _interopRequireDefault(_classnames);
var _reactRedux = __webpack_require__(6);
var _utils = __webpack_require__(1);
var _actions = __webpack_require__(9);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var SearchBar = function (_React$Component) {
_inherits(SearchBar, _React$Component);
function SearchBar(props) {
_classCallCheck(this, SearchBar);
var _this = _possibleConstructorReturn(this, (SearchBar.__proto__ || Object.getPrototypeOf(SearchBar)).call(this, props));
_this.onChange = _this.onChange.bind(_this);
_this.onSubmit = _this.onSubmit.bind(_this);
_this.onClear = _this.onClear.bind(_this);
_this.state = { query: props.query || '' };
return _this;
}
_createClass(SearchBar, [{
key: 'onChange',
value: function onChange(event) {
this.setState({ query: event.target.value });
}
}, {
key: 'onSubmit',
value: function onSubmit(event) {
event.preventDefault();
this.props.setQuery(this.state.query);
this.props.fetchItems();
}
}, {
key: 'onClear',
value: function onClear() {
this.props.setQuery('');
this.props.fetchItems();
this.setState({ query: '' });
}
}, {
key: 'componentWillReceiveProps',
value: function componentWillReceiveProps(nextProps) {
this.setState({ query: nextProps.query });
}
}, {
key: 'render',
value: function render() {
return _react2.default.createElement(
'div',
{ className: 'search form-inline' },
_react2.default.createElement(
'span',
{ className: 'search__icon' },
_react2.default.createElement('i', { className: 'icon--search icon--gray-light' })
),
_react2.default.createElement(
'div',
{ className: (0, _classnames2.default)('search__form input-group', {
'searchForm--active': !!this.state.query
}) },
_react2.default.createElement(
'form',
{ className: 'form-inline', onSubmit: this.onSubmit },
_react2.default.createElement('input', { type: 'text',
name: 'q',
className: 'search__input form-control',
placeholder: 'Search for...',
'aria-label': 'Search for...',
value: this.state.query || '',
onChange: this.onChange
}),
_react2.default.createElement(
'div',
{ className: 'search__form__buttons' },
_react2.default.createElement(
'span',
{ className: 'search__clear', onClick: this.onClear },
_react2.default.createElement('img', { src: '/static/search_clear.png', width: '16', height: '16' })
),
_react2.default.createElement(
'button',
{ className: 'btn btn-outline-secondary', type: 'submit' },
(0, _utils.gettext)('Search')
)
)
)
)
);
}
}]);
return SearchBar;
}(_react2.default.Component);
SearchBar.propTypes = {
query: _propTypes2.default.string,
setQuery: _propTypes2.default.func,
fetchItems: _propTypes2.default.func
};
var mapStateToProps = function mapStateToProps(state) {
return {
query: state.activeQuery
};
};
var mapDispatchToProps = function mapDispatchToProps(dispatch) {
return {
setQuery: function setQuery(query) {
return dispatch((0, _actions.setQuery)(query));
}
};
};
exports.default = (0, _reactRedux.connect)(mapStateToProps, mapDispatchToProps)(SearchBar);
/***/ }),
/***/ 75:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = __webpack_require__(0);
var _react2 = _interopRequireDefault(_react);
var _propTypes = __webpack_require__(2);
var _propTypes2 = _interopRequireDefault(_propTypes);
var _utils = __webpack_require__(1);
var _SearchBar = __webpack_require__(65);
var _SearchBar2 = _interopRequireDefault(_SearchBar);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var ListBar = function (_React$Component) {
_inherits(ListBar, _React$Component);
function ListBar() {
_classCallCheck(this, ListBar);
return _possibleConstructorReturn(this, (ListBar.__proto__ || Object.getPrototypeOf(ListBar)).apply(this, arguments));
}
_createClass(ListBar, [{
key: 'render',
value: function render() {
var _this2 = this;
return _react2.default.createElement(
'section',
{ className: 'content-header' },
_react2.default.createElement(
'nav',
{ className: 'content-bar navbar content-bar--side-padding' },
_react2.default.createElement(_SearchBar2.default, { setQuery: this.props.setQuery, fetchItems: function fetchItems() {
return _this2.props.fetch();
} }),
_react2.default.createElement(
'div',
{ className: 'content-bar__right' },
_react2.default.createElement(
'button',
{
className: 'btn btn-outline-secondary btn-responsive',
onClick: function onClick() {
return _this2.props.onNewItem();
} },
(0, _utils.gettext)('New {{ buttonName }}', { buttonName: this.props.buttonName })
)
)
)
);
}
}]);
return ListBar;
}(_react2.default.Component);
ListBar.propTypes = {
setQuery: _propTypes2.default.func,
fetch: _propTypes2.default.func,
buttonName: _propTypes2.default.string,
onNewItem: _propTypes2.default.func
};
exports.default = ListBar;
/***/ }),
/***/ 9:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.SET_VIEW = exports.RESET_FILTER = exports.SET_CREATED_FILTER = exports.RECIEVE_NEXT_ITEMS = exports.START_LOADING = exports.TOGGLE_FILTER = exports.TOGGLE_NAVIGATION = exports.SET_NEW_ITEMS = exports.SET_TOPICS = exports.REMOVE_NEW_ITEMS = exports.SET_NEW_ITEMS_BY_TOPIC = exports.REMOVE_BOOKMARK = exports.BOOKMARK_ITEMS = exports.PRINT_ITEMS = exports.COPY_ITEMS = exports.DOWNLOAD_ITEMS = exports.SHARE_ITEMS = exports.SELECT_NONE = exports.SELECT_ALL = exports.TOGGLE_SELECTED = exports.TOGGLE_NEWS = exports.ADD_TOPIC = exports.INIT_DATA = exports.RECIEVE_ITEM = exports.RECIEVE_ITEMS = exports.QUERY_ITEMS = exports.SET_QUERY = exports.OPEN_ITEM = exports.PREVIEW_ITEM = exports.SET_ACTIVE = exports.SET_ITEMS = exports.SET_STATE = undefined;
exports.setState = setState;
exports.setItems = setItems;
exports.setActive = setActive;
exports.preview = preview;
exports.previewAndCopy = previewAndCopy;
exports.previewItem = previewItem;
exports.openItemDetails = openItemDetails;
exports.openItem = openItem;
exports.setQuery = setQuery;
exports.queryItems = queryItems;
exports.recieveItems = recieveItems;
exports.recieveItem = recieveItem;
exports.initData = initData;
exports.addTopic = addTopic;
exports.toggleNews = toggleNews;
exports.copyPreviewContents = copyPreviewContents;
exports.printItem = printItem;
exports.fetchItems = fetchItems;
exports.fetchItem = fetchItem;
exports.followTopic = followTopic;
exports.submitFollowTopic = submitFollowTopic;
exports.shareItems = shareItems;
exports.submitShareItem = submitShareItem;
exports.toggleSelected = toggleSelected;
exports.selectAll = selectAll;
exports.selectNone = selectNone;
exports.setShareItems = setShareItems;
exports.setDownloadItems = setDownloadItems;
exports.setCopyItem = setCopyItem;
exports.setPrintItem = setPrintItem;
exports.setBookmarkItems = setBookmarkItems;
exports.removeBookmarkItems = removeBookmarkItems;
exports.bookmarkItems = bookmarkItems;
exports.removeBookmarks = removeBookmarks;
exports.fetchVersions = fetchVersions;
exports.downloadItems = downloadItems;
exports.submitDownloadItems = submitDownloadItems;
exports.setNewItemsByTopic = setNewItemsByTopic;
exports.removeNewItems = removeNewItems;
exports.pushNotification = pushNotification;
exports.setNewItems = setNewItems;
exports.fetchNewItems = fetchNewItems;
exports.fetchNext = fetchNext;
exports.toggleNavigation = toggleNavigation;
exports.toggleFilter = toggleFilter;
exports.startLoading = startLoading;
exports.recieveNextItems = recieveNextItems;
exports.fetchMoreItems = fetchMoreItems;
exports.initParams = initParams;
exports.setCreatedFilter = setCreatedFilter;
exports.resetFilter = resetFilter;
exports.setTopicQuery = setTopicQuery;
exports.setView = setView;
exports.refresh = refresh;
var _lodash = __webpack_require__(7);
var _server = __webpack_require__(15);
var _server2 = _interopRequireDefault(_server);
var _analytics = __webpack_require__(29);
var _analytics2 = _interopRequireDefault(_analytics);
var _utils = __webpack_require__(1);
var _utils2 = __webpack_require__(11);
var _actions = __webpack_require__(16);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var SET_STATE = exports.SET_STATE = 'SET_STATE';
function setState(state) {
return { type: SET_STATE, state: state };
}
var SET_ITEMS = exports.SET_ITEMS = 'SET_ITEMS';
function setItems(items) {
return { type: SET_ITEMS, items: items };
}
var SET_ACTIVE = exports.SET_ACTIVE = 'SET_ACTIVE';
function setActive(item) {
return { type: SET_ACTIVE, item: item };
}
var PREVIEW_ITEM = exports.PREVIEW_ITEM = 'PREVIEW_ITEM';
function preview(item) {
return { type: PREVIEW_ITEM, item: item };
}
function previewAndCopy(item) {
return function (dispatch) {
dispatch(previewItem(item));
dispatch(copyPreviewContents(item));
};
}
function previewItem(item) {
return function (dispatch, getState) {
(0, _utils2.markItemAsRead)(item, getState());
dispatch(preview(item));
item && _analytics2.default.itemEvent('preview', item);
};
}
var OPEN_ITEM = exports.OPEN_ITEM = 'OPEN_ITEM';
function openItemDetails(item) {
return { type: OPEN_ITEM, item: item };
}
function openItem(item) {
return function (dispatch, getState) {
(0, _utils2.markItemAsRead)(item, getState());
dispatch(openItemDetails(item));
(0, _utils.updateRouteParams)({
item: item ? item._id : null
}, getState());
item && _analytics2.default.itemEvent('open', item);
_analytics2.default.itemView(item);
};
}
var SET_QUERY = exports.SET_QUERY = 'SET_QUERY';
function setQuery(query) {
query && _analytics2.default.event('search', query);
return { type: SET_QUERY, query: query };
}
var QUERY_ITEMS = exports.QUERY_ITEMS = 'QUERY_ITEMS';
function queryItems() {
return { type: QUERY_ITEMS };
}
var RECIEVE_ITEMS = exports.RECIEVE_ITEMS = 'RECIEVE_ITEMS';
function recieveItems(data) {
return { type: RECIEVE_ITEMS, data: data };
}
var RECIEVE_ITEM = exports.RECIEVE_ITEM = 'RECIEVE_ITEM';
function recieveItem(data) {
return { type: RECIEVE_ITEM, data: data };
}
var INIT_DATA = exports.INIT_DATA = 'INIT_DATA';
function initData(wireData, readData, newsOnly) {
return { type: INIT_DATA, wireData: wireData, readData: readData, newsOnly: newsOnly };
}
var ADD_TOPIC = exports.ADD_TOPIC = 'ADD_TOPIC';
function addTopic(topic) {
return { type: ADD_TOPIC, topic: topic };
}
var TOGGLE_NEWS = exports.TOGGLE_NEWS = 'TOGGLE_NEWS';
function toggleNews() {
(0, _utils2.toggleNewsOnlyParam)();
return { type: TOGGLE_NEWS };
}
/**
* Copy contents of item preview.
*
* This is an initial version, should be updated with preview markup changes.
*/
function copyPreviewContents(item) {
return function (dispatch, getState) {
var textarea = document.getElementById('copy-area');
var contents = [];
contents.push((0, _utils.fullDate)(item.versioncreated));
item.slugline && contents.push(item.slugline);
item.headline && contents.push(item.headline);
item.byline && contents.push((0, _utils.gettext)('By: {{ byline }}', { byline: (0, _lodash.get)(item, 'byline') }));
item.source && contents.push((0, _utils.gettext)('Source: {{ source }}', { source: item.source }));
contents.push('');
if (item.description_text) {
contents.push(item.description_text);
} else if (item.description_html) {
contents.push((0, _utils.getTextFromHtml)(item.description_html));
}
contents.push('');
if (item.body_text) {
contents.push(item.body_text);
} else if (item.body_html) {
contents.push((0, _utils.getTextFromHtml)(item.body_html));
}
textarea.value = contents.join('\n');
textarea.select();
if (document.execCommand('copy')) {
_utils.notify.success((0, _utils.gettext)('Item copied successfully.'));
item && _analytics2.default.itemEvent('copy', item);
} else {
_utils.notify.error((0, _utils.gettext)('Sorry, Copy is not supported.'));
}
if (getState().user) {
_server2.default.post('/wire/' + item._id + '/copy').then(dispatch(setCopyItem(item._id))).catch(errorHandler);
}
};
}
function printItem(item) {
return function (dispatch, getState) {
window.open('/wire/' + item._id + '?print', '_blank');
item && _analytics2.default.itemEvent('print', item);
if (getState().user) {
dispatch(setPrintItem(item._id));
}
};
}
/**
* Search server request
*
* @param {Object} state
* @param {bool} next
* @return {Promise}
*/
function search(state, next) {
var activeFilter = (0, _lodash.get)(state, 'wire.activeFilter', {});
var activeNavigation = (0, _lodash.get)(state, 'wire.activeNavigation');
var createdFilter = (0, _lodash.get)(state, 'wire.createdFilter', {});
var newsOnly = !!state.newsOnly;
var params = {
q: state.query,
bookmarks: state.bookmarks && state.user,
navigation: activeNavigation,
filter: !(0, _lodash.isEmpty)(activeFilter) && JSON.stringify(activeFilter),
from: next ? state.items.length : 0,
created_from: createdFilter.from,
created_to: createdFilter.to,
timezone_offset: (0, _utils.getTimezoneOffset)(),
newsOnly: newsOnly
};
var queryString = Object.keys(params).filter(function (key) {
return params[key];
}).map(function (key) {
return [key, params[key]].join('=');
}).join('&');
return _server2.default.get('/search?' + queryString);
}
/**
* Fetch items for current query
*/
function fetchItems() {
return function (dispatch, getState) {
var start = Date.now();
dispatch(queryItems());
return search(getState()).then(function (data) {
return dispatch(recieveItems(data));
}).then(function () {
var state = getState();
(0, _utils.updateRouteParams)({
q: state.query
}, state);
_analytics2.default.timingComplete('search', Date.now() - start);
}).catch(errorHandler);
};
}
function fetchItem(id) {
return function (dispatch) {
return _server2.default.get('/wire/' + id + '?format=json').then(function (data) {
return dispatch(recieveItem(data));
}).catch(errorHandler);
};
}
/**
* Start a follow topic action
*
* @param {String} topic
*/
function followTopic(topic) {
return (0, _actions.renderModal)('followTopic', { topic: topic });
}
function submitFollowTopic(data) {
return function (dispatch, getState) {
var user = getState().user;
var url = '/api/users/' + user + '/topics';
data.timezone_offset = (0, _utils.getTimezoneOffset)();
return _server2.default.post(url, data).then(function (updates) {
return dispatch(addTopic(Object.assign(data, updates)));
}).then(function () {
return dispatch((0, _actions.closeModal)());
}).catch(errorHandler);
};
}
/**
* Start share item action - display modal to pick users
*
* @return {function}
*/
function shareItems(items) {
return function (dispatch, getState) {
var user = getState().user;
var company = getState().company;
return _server2.default.get('/companies/' + company + '/users').then(function (users) {
return users.filter(function (u) {
return u._id !== user;
});
}).then(function (users) {
return dispatch((0, _actions.renderModal)('shareItem', { items: items, users: users }));
}).catch(errorHandler);
};
}
/**
* Submit share item form and close modal if that works
*
* @param {Object} data
*/
function submitShareItem(data) {
return function (dispatch, getState) {
return _server2.default.post('/wire_share', data).then(function () {
if (data.items.length > 1) {
_utils.notify.success((0, _utils.gettext)('Items were shared successfully.'));
} else {
_utils.notify.success((0, _utils.gettext)('Item was shared successfully.'));
}
dispatch((0, _actions.closeModal)());
}).then(function () {
return multiItemEvent('share', data.items, getState());
}).then(function () {
return dispatch(setShareItems(data.items));
}).catch(errorHandler);
};
}
var TOGGLE_SELECTED = exports.TOGGLE_SELECTED = 'TOGGLE_SELECTED';
function toggleSelected(item) {
return { type: TOGGLE_SELECTED, item: item };
}
var SELECT_ALL = exports.SELECT_ALL = 'SELECT_ALL';
function selectAll() {
return { type: SELECT_ALL };
}
var SELECT_NONE = exports.SELECT_NONE = 'SELECT_NONE';
function selectNone() {
return { type: SELECT_NONE };
}
var SHARE_ITEMS = exports.SHARE_ITEMS = 'SHARE_ITEMS';
function setShareItems(items) {
return { type: SHARE_ITEMS, items: items };
}
var DOWNLOAD_ITEMS = exports.DOWNLOAD_ITEMS = 'DOWNLOAD_ITEMS';
function setDownloadItems(items) {
return { type: DOWNLOAD_ITEMS, items: items };
}
var COPY_ITEMS = exports.COPY_ITEMS = 'COPY_ITEMS';
function setCopyItem(item) {
return { type: COPY_ITEMS, items: [item] };
}
var PRINT_ITEMS = exports.PRINT_ITEMS = 'PRINT_ITEMS';
function setPrintItem(item) {
return { type: PRINT_ITEMS, items: [item] };
}
var BOOKMARK_ITEMS = exports.BOOKMARK_ITEMS = 'BOOKMARK_ITEMS';
function setBookmarkItems(items) {
return { type: BOOKMARK_ITEMS, items: items };
}
var REMOVE_BOOKMARK = exports.REMOVE_BOOKMARK = 'REMOVE_BOOKMARK';
function removeBookmarkItems(items) {
return { type: REMOVE_BOOKMARK, items: items };
}
function bookmarkItems(items) {
return function (dispatch, getState) {
return _server2.default.post('/wire_bookmark', { items: items }).then(function () {
if (items.length > 1) {
_utils.notify.success((0, _utils.gettext)('Items were bookmarked successfully.'));
} else {
_utils.notify.success((0, _utils.gettext)('Item was bookmarked successfully.'));
}
}).then(function () {
multiItemEvent('bookmark', items, getState());
}).then(function () {
return dispatch(setBookmarkItems(items));
}).catch(errorHandler);
};
}
function removeBookmarks(items) {
return function (dispatch, getState) {
return _server2.default.del('/wire_bookmark', { items: items }).then(function () {
if (items.length > 1) {
_utils.notify.success((0, _utils.gettext)('Items were removed from bookmarks successfully.'));
} else {
_utils.notify.success((0, _utils.gettext)('Item was removed from bookmarks successfully.'));
}
}).then(function () {
return dispatch(removeBookmarkItems(items));
}).then(function () {
return getState().bookmarks && dispatch(fetchItems());
}).catch(errorHandler);
};
}
function errorHandler(reason) {
console.error('error', reason);
}
/**
* Fetch item versions.
*
* @param {Object} item
* @return {Promise}
*/
function fetchVersions(item) {
return function () {
return _server2.default.get('/wire/' + item._id + '/versions').then(function (data) {
return data._items;
});
};
}
/**
* Download items - display modal to pick a format
*
* @param {Array} items
*/
function downloadItems(items) {
return (0, _actions.renderModal)('downloadItems', { items: items });
}
/**
* Start download - open download view in new window.
*
* @param {Array} items
* @param {String} format
*/
function submitDownloadItems(items, format) {
return function (dispatch, getState) {
window.open('/download/' + items.join(',') + '?format=' + format, '_blank');
dispatch(setDownloadItems(items));
dispatch((0, _actions.closeModal)());
multiItemEvent('download', items, getState());
};
}
var SET_NEW_ITEMS_BY_TOPIC = exports.SET_NEW_ITEMS_BY_TOPIC = 'SET_NEW_ITEMS_BY_TOPIC';
function setNewItemsByTopic(data) {
return { type: SET_NEW_ITEMS_BY_TOPIC, data: data };
}
var REMOVE_NEW_ITEMS = exports.REMOVE_NEW_ITEMS = 'REMOVE_NEW_ITEMS';
function removeNewItems(data) {
return { type: REMOVE_NEW_ITEMS, data: data };
}
/**
* Handle server push notification
*
* @param {Object} data
*/
function pushNotification(push) {
return function (dispatch, getState) {
var user = getState().user;
switch (push.event) {
case 'topic_matches':
return dispatch(setNewItemsByTopic(push.extra));
case 'new_item':
return new Promise(function (resolve, reject) {
dispatch(fetchNewItems()).then(resolve).catch(reject);
});
case 'topics:' + user:
return dispatch(reloadTopics(user));
}
};
}
function reloadTopics(user) {
return function (dispatch) {
return _server2.default.get('/users/' + user + '/topics').then(function (data) {
return dispatch(setTopics(data._items));
}).catch(errorHandler);
};
}
var SET_TOPICS = exports.SET_TOPICS = 'SET_TOPICS';
function setTopics(topics) {
return { type: SET_TOPICS, topics: topics };
}
var SET_NEW_ITEMS = exports.SET_NEW_ITEMS = 'SET_NEW_ITEMS';
function setNewItems(data) {
return { type: SET_NEW_ITEMS, data: data };
}
function fetchNewItems() {
return function (dispatch, getState) {
return search(getState()).then(function (response) {
return dispatch(setNewItems(response));
});
};
}
function fetchNext(item) {
return function () {
if (!item.nextversion) {
return Promise.reject();
}
return _server2.default.get('/wire/' + item.nextversion + '?format=json');
};
}
var TOGGLE_NAVIGATION = exports.TOGGLE_NAVIGATION = 'TOGGLE_NAVIGATION';
function _toggleNavigation(navigation) {
return { type: TOGGLE_NAVIGATION, navigation: navigation };
}
function toggleNavigation(navigation) {
return function (dispatch) {
dispatch(setQuery(''));
dispatch(_toggleNavigation(navigation));
return dispatch(fetchItems());
};
}
var TOGGLE_FILTER = exports.TOGGLE_FILTER = 'TOGGLE_FILTER';
function toggleFilter(key, val, single) {
return function (dispatch) {
setTimeout(function () {
return dispatch({ type: TOGGLE_FILTER, key: key, val: val, single: single });
});
};
}
var START_LOADING = exports.START_LOADING = 'START_LOADING';
function startLoading() {
return { type: START_LOADING };
}
var RECIEVE_NEXT_ITEMS = exports.RECIEVE_NEXT_ITEMS = 'RECIEVE_NEXT_ITEMS';
function recieveNextItems(data) {
return { type: RECIEVE_NEXT_ITEMS, data: data };
}
var MAX_ITEMS = 1000; // server limit
function fetchMoreItems() {
return function (dispatch, getState) {
var state = getState();
var limit = Math.min(MAX_ITEMS, state.totalItems);
if (state.isLoading || state.items.length >= limit) {
return Promise.reject();
}
dispatch(startLoading());
return search(getState(), true).then(function (data) {
return dispatch(recieveNextItems(data));
}).catch(errorHandler);
};
}
/**
* Set state on app init using url params
*
* @param {URLSearchParams} params
*/
function initParams(params) {
return function (dispatch, getState) {
if (params.get('q')) {
dispatch(setQuery(params.get('q')));
}
if (params.get('item')) {
dispatch(fetchItem(params.get('item'))).then(function () {
var item = getState().itemsById[params.get('item')];
dispatch(openItem(item));
});
}
};
}
function _setCreatedFilter(filter) {
return { type: SET_CREATED_FILTER, filter: filter };
}
var SET_CREATED_FILTER = exports.SET_CREATED_FILTER = 'SET_CREATED_FILTER';
function setCreatedFilter(filter) {
return function (dispatch) {
dispatch(_setCreatedFilter(filter));
};
}
function _resetFilter(filter) {
return { type: RESET_FILTER, filter: filter };
}
var RESET_FILTER = exports.RESET_FILTER = 'RESET_FILTER';
function resetFilter(filter) {
return function (dispatch) {
dispatch(_resetFilter(filter));
dispatch(fetchItems());
};
}
/**
* Set query for given topic
*
* @param {Object} topic
* @return {Promise}
*/
function setTopicQuery(topic) {
return function (dispatch) {
dispatch(_toggleNavigation());
dispatch(setQuery(topic.query || ''));
dispatch(_resetFilter(topic.filter));
dispatch(_setCreatedFilter(topic.created));
return dispatch(fetchItems());
};
}
var SET_VIEW = exports.SET_VIEW = 'SET_VIEW';
function setView(view) {
localStorage.setItem('view', view);
return { type: SET_VIEW, view: view };
}
function refresh() {
return function (dispatch, getState) {
return dispatch(recieveItems(getState().newItemsData));
};
}
function multiItemEvent(event, items, state) {
items.forEach(function (itemId) {
var item = state.itemsById[itemId];
item && _analytics2.default.itemEvent(event, item);
});
}
/***/ })
},[610]); | PypiClean |
/Camelot-13.04.13-gpl-pyqt.tar.gz/Camelot-13.04.13-gpl-pyqt/doc/sphinx/source/doc/delegates.rst | .. _doc-delegates:
#############
Delegates
#############
`Delegates` are a cornerstone of the Qt model/delegate/view framework.
A delegate is used to display and edit data from a `model`.
In the Camelot framework, every field of an `Entity` has an associated delegate
that specifies how the field will be displayed and edited. When a new form or
table is constructed, the delegates of all fields on the form or table will
construct `editors` for their fields and fill them with data from the model.
When the data has been edited in the form, the delegates will take care of
updating the model with the new data.
All Camelot delegates are subclasses of :class:`QtGui.QAbstractItemDelegate`.
The `Qt website <http://www.qt-project.org>`_ provides detailed information the differenct classes involved in the model/delegate/view framework.
.. _specifying-delegates:
Specifying delegates
====================
The use of a specific delegate can be forced by using the ``delegate`` field
attribute. Suppose ``rating`` is a field of type :c:type:`integer`, then it can
be forced to be visualized as stars::
from camelot.view.controls import delegates
class Movie( Entity ):
title = Column( Unicode(50) )
rating = Column( Integer )
class Admin( EntityAdmin ):
list_display = ['title', 'rating']
field_attributes = {'rating':{'delegate':delegates.StarDelegate}}
The above code will result in:
.. image:: ../_static/editors/StarEditor_editable.png
If no `delegate` field attribute is given, a default one will be taken
depending on the sqlalchemy field type.
All available delegates can be found in :mod:`camelot.view.controls.delegates`
| PypiClean |
/CartiMorph_nnUNet-1.7.14.tar.gz/CartiMorph_nnUNet-1.7.14/CartiMorph_nnUNet/preprocessing/cropping.py |
import SimpleITK as sitk
import numpy as np
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
from collections import OrderedDict
def create_nonzero_mask(data):
from scipy.ndimage import binary_fill_holes
assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
for c in range(data.shape[0]):
this_mask = data[c] != 0
nonzero_mask = nonzero_mask | this_mask
nonzero_mask = binary_fill_holes(nonzero_mask)
return nonzero_mask
def get_bbox_from_mask(mask, outside_value=0):
mask_voxel_coords = np.where(mask != outside_value)
minzidx = int(np.min(mask_voxel_coords[0]))
maxzidx = int(np.max(mask_voxel_coords[0])) + 1
minxidx = int(np.min(mask_voxel_coords[1]))
maxxidx = int(np.max(mask_voxel_coords[1])) + 1
minyidx = int(np.min(mask_voxel_coords[2]))
maxyidx = int(np.max(mask_voxel_coords[2])) + 1
return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]]
def crop_to_bbox(image, bbox):
assert len(image.shape) == 3, "only supports 3d images"
resizer = (slice(bbox[0][0], bbox[0][1]), slice(bbox[1][0], bbox[1][1]), slice(bbox[2][0], bbox[2][1]))
return image[resizer]
def get_case_identifier(case):
case_identifier = case[0].split("/")[-1].split(".nii.gz")[0][:-5]
return case_identifier
def get_case_identifier_from_npz(case):
case_identifier = case.split("/")[-1][:-4]
return case_identifier
def load_case_from_list_of_files(data_files, seg_file=None):
assert isinstance(data_files, list) or isinstance(data_files, tuple), "case must be either a list or a tuple"
properties = OrderedDict()
data_itk = [sitk.ReadImage(f) for f in data_files]
properties["original_size_of_raw_data"] = np.array(data_itk[0].GetSize())[[2, 1, 0]]
properties["original_spacing"] = np.array(data_itk[0].GetSpacing())[[2, 1, 0]]
properties["list_of_data_files"] = data_files
properties["seg_file"] = seg_file
properties["itk_origin"] = data_itk[0].GetOrigin()
properties["itk_spacing"] = data_itk[0].GetSpacing()
properties["itk_direction"] = data_itk[0].GetDirection()
data_npy = np.vstack([sitk.GetArrayFromImage(d)[None] for d in data_itk])
if seg_file is not None:
seg_itk = sitk.ReadImage(seg_file)
seg_npy = sitk.GetArrayFromImage(seg_itk)[None].astype(np.float32)
else:
seg_npy = None
return data_npy.astype(np.float32), seg_npy, properties
def crop_to_nonzero(data, seg=None, nonzero_label=-1):
"""
:param data:
:param seg:
:param nonzero_label: this will be written into the segmentation map
:return:
"""
nonzero_mask = create_nonzero_mask(data)
bbox = get_bbox_from_mask(nonzero_mask, 0)
cropped_data = []
for c in range(data.shape[0]):
cropped = crop_to_bbox(data[c], bbox)
cropped_data.append(cropped[None])
data = np.vstack(cropped_data)
if seg is not None:
cropped_seg = []
for c in range(seg.shape[0]):
cropped = crop_to_bbox(seg[c], bbox)
cropped_seg.append(cropped[None])
seg = np.vstack(cropped_seg)
nonzero_mask = crop_to_bbox(nonzero_mask, bbox)[None]
if seg is not None:
seg[(seg == 0) & (nonzero_mask == 0)] = nonzero_label
else:
nonzero_mask = nonzero_mask.astype(int)
nonzero_mask[nonzero_mask == 0] = nonzero_label
nonzero_mask[nonzero_mask > 0] = 0
seg = nonzero_mask
return data, seg, bbox
def get_patient_identifiers_from_cropped_files(folder):
return [i.split("/")[-1][:-4] for i in subfiles(folder, join=True, suffix=".npz")]
class ImageCropper(object):
def __init__(self, num_threads, output_folder=None):
"""
This one finds a mask of nonzero elements (must be nonzero in all modalities) and crops the image to that mask.
In the case of BRaTS and ISLES data this results in a significant reduction in image size
:param num_threads:
:param output_folder: whete to store the cropped data
:param list_of_files:
"""
self.output_folder = output_folder
self.num_threads = num_threads
if self.output_folder is not None:
maybe_mkdir_p(self.output_folder)
@staticmethod
def crop(data, properties, seg=None):
shape_before = data.shape
data, seg, bbox = crop_to_nonzero(data, seg, nonzero_label=-1)
shape_after = data.shape
print("before crop:", shape_before, "after crop:", shape_after, "spacing:",
np.array(properties["original_spacing"]), "\n")
properties["crop_bbox"] = bbox
properties['classes'] = np.unique(seg)
seg[seg < -1] = 0
properties["size_after_cropping"] = data[0].shape
return data, seg, properties
@staticmethod
def crop_from_list_of_files(data_files, seg_file=None):
data, seg, properties = load_case_from_list_of_files(data_files, seg_file)
return ImageCropper.crop(data, properties, seg)
def load_crop_save(self, case, case_identifier, overwrite_existing=False):
try:
print(case_identifier)
if overwrite_existing \
or (not os.path.isfile(os.path.join(self.output_folder, "%s.npz" % case_identifier))
or not os.path.isfile(os.path.join(self.output_folder, "%s.pkl" % case_identifier))):
data, seg, properties = self.crop_from_list_of_files(case[:-1], case[-1])
all_data = np.vstack((data, seg))
np.savez_compressed(os.path.join(self.output_folder, "%s.npz" % case_identifier), data=all_data)
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
except Exception as e:
print("Exception in", case_identifier, ":")
print(e)
raise e
def get_list_of_cropped_files(self):
return subfiles(self.output_folder, join=True, suffix=".npz")
def get_patient_identifiers_from_cropped_files(self):
return [i.split("/")[-1][:-4] for i in self.get_list_of_cropped_files()]
def run_cropping(self, list_of_files, overwrite_existing=False, output_folder=None):
"""
also copied ground truth nifti segmentation into the preprocessed folder so that we can use them for evaluation
on the cluster
:param list_of_files: list of list of files [[PATIENTID_TIMESTEP_0000.nii.gz], [PATIENTID_TIMESTEP_0000.nii.gz]]
:param overwrite_existing:
:param output_folder:
:return:
"""
if output_folder is not None:
self.output_folder = output_folder
output_folder_gt = os.path.join(self.output_folder, "gt_segmentations")
maybe_mkdir_p(output_folder_gt)
for j, case in enumerate(list_of_files):
if case[-1] is not None:
shutil.copy(case[-1], output_folder_gt)
list_of_args = []
for j, case in enumerate(list_of_files):
case_identifier = get_case_identifier(case)
list_of_args.append((case, case_identifier, overwrite_existing))
p = Pool(self.num_threads)
p.starmap(self.load_crop_save, list_of_args)
p.close()
p.join()
def load_properties(self, case_identifier):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def save_properties(self, case_identifier, properties):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f) | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/types/messages_and_media/video_note.py |
from datetime import datetime
from typing import List
import fipper
from fipper import raw, utils
from fipper import types
from fipper.file_id import FileId, FileType, FileUniqueId, FileUniqueType
from ..object import Object
class VideoNote(Object):
"""A video note.
Parameters:
file_id (``str``):
Identifier for this file, which can be used to download or reuse the file.
file_unique_id (``str``):
Unique identifier for this file, which is supposed to be the same over time and for different accounts.
Can't be used to download or reuse the file.
length (``int``):
Video width and height as defined by sender.
duration (``int``):
Duration of the video in seconds as defined by sender.
mime_type (``str``, *optional*):
MIME type of the file as defined by sender.
file_size (``int``, *optional*):
File size.
date (:py:obj:`~datetime.datetime`, *optional*):
Date the video note was sent.
thumbs (List of :obj:`~fipper.types.Thumbnail`, *optional*):
Video thumbnails.
"""
def __init__(
self,
*,
client: "fipper.Client" = None,
file_id: str,
file_unique_id: str,
length: int,
duration: int,
thumbs: List["types.Thumbnail"] = None,
mime_type: str = None,
file_size: int = None,
date: datetime = None
):
super().__init__(client)
self.file_id = file_id
self.file_unique_id = file_unique_id
self.mime_type = mime_type
self.file_size = file_size
self.date = date
self.length = length
self.duration = duration
self.thumbs = thumbs
@staticmethod
def _parse(
client,
video_note: "raw.types.Document",
video_attributes: "raw.types.DocumentAttributeVideo"
) -> "VideoNote":
return VideoNote(
file_id=FileId(
file_type=FileType.VIDEO_NOTE,
dc_id=video_note.dc_id,
media_id=video_note.id,
access_hash=video_note.access_hash,
file_reference=video_note.file_reference
).encode(),
file_unique_id=FileUniqueId(
file_unique_type=FileUniqueType.DOCUMENT,
media_id=video_note.id
).encode(),
length=video_attributes.w,
duration=video_attributes.duration,
file_size=video_note.size,
mime_type=video_note.mime_type,
date=utils.timestamp_to_datetime(video_note.date),
thumbs=types.Thumbnail._parse(client, video_note),
client=client
) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ats/model/job.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.ats.model.job_status_enum import JobStatusEnum
from MergePythonSDK.shared.model.remote_data import RemoteData
from MergePythonSDK.ats.model.url import Url
globals()['JobStatusEnum'] = JobStatusEnum
globals()['RemoteData'] = RemoteData
globals()['Url'] = Url
class Job(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'id': (str, none_type,), # noqa: E501
'remote_id': (str, none_type, none_type,), # noqa: E501
'name': (str, none_type, none_type,), # noqa: E501
'description': (str, none_type, none_type,), # noqa: E501
'code': (str, none_type, none_type,), # noqa: E501
'status': (JobStatusEnum, str, none_type,),
'job_posting_urls': ([Url], none_type,), # noqa: E501
'remote_created_at': (datetime, none_type, none_type,), # noqa: E501
'remote_updated_at': (datetime, none_type, none_type,), # noqa: E501
'confidential': (bool, none_type, none_type,), # noqa: E501
'departments': ([str, none_type], none_type,), # noqa: E501
'offices': ([str, none_type], none_type,), # noqa: E501
'hiring_managers': ([str, none_type], none_type,), # noqa: E501
'recruiters': ([str, none_type], none_type,), # noqa: E501
'remote_data': ([RemoteData], none_type, none_type,), # noqa: E501
'remote_was_deleted': (bool, none_type,), # noqa: E501
}
expands_types = {"departments": "Department", "hiring_managers": "RemoteUser", "offices": "Office", "recruiters": "RemoteUser"}
# update types with expands
for key, val in expands_types.items():
if key in defined_types.keys():
expands_model = import_model_by_name(val, "ats")
if len(defined_types[key]) > 0 and isinstance(defined_types[key][0], list):
defined_types[key][0].insert(0, expands_model)
defined_types[key] = (*defined_types[key], expands_model)
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'remote_id': 'remote_id', # noqa: E501
'name': 'name', # noqa: E501
'description': 'description', # noqa: E501
'code': 'code', # noqa: E501
'status': 'status', # noqa: E501
'job_posting_urls': 'job_posting_urls', # noqa: E501
'remote_created_at': 'remote_created_at', # noqa: E501
'remote_updated_at': 'remote_updated_at', # noqa: E501
'confidential': 'confidential', # noqa: E501
'departments': 'departments', # noqa: E501
'offices': 'offices', # noqa: E501
'hiring_managers': 'hiring_managers', # noqa: E501
'recruiters': 'recruiters', # noqa: E501
'remote_data': 'remote_data', # noqa: E501
'remote_was_deleted': 'remote_was_deleted', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
'remote_data', # noqa: E501
'remote_was_deleted', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Job - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
name (str, none_type): The job's name.. [optional] # noqa: E501
description (str, none_type): The job's description.. [optional] # noqa: E501
code (str, none_type): The job's code. Typically an additional identifier used to reference the particular job that is displayed on the ATS.. [optional] # noqa: E501
status (bool, dict, float, int, list, str, none_type): The job's status.. [optional] # noqa: E501
job_posting_urls ([Url]): [optional] # noqa: E501
remote_created_at (datetime, none_type): When the third party's job was created.. [optional] # noqa: E501
remote_updated_at (datetime, none_type): When the third party's job was updated.. [optional] # noqa: E501
confidential (bool, none_type): Whether the job is confidential.. [optional] # noqa: E501
departments ([str, none_type]): IDs of `Department` objects for this `Job`.. [optional] # noqa: E501
offices ([str, none_type]): IDs of `Office` objects for this `Job`.. [optional] # noqa: E501
hiring_managers ([str, none_type]): IDs of `RemoteUser` objects that serve as hiring managers for this `Job`.. [optional] # noqa: E501
recruiters ([str, none_type]): IDs of `RemoteUser` objects that serve as recruiters for this `Job`.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
remote_was_deleted (bool): Indicates whether or not this object has been deleted by third party webhooks.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id = kwargs.get("remote_id", None)
self.name = kwargs.get("name", None)
self.description = kwargs.get("description", None)
self.code = kwargs.get("code", None)
self.status = kwargs.get("status", None)
self.job_posting_urls = kwargs.get("job_posting_urls", None)
self.remote_created_at = kwargs.get("remote_created_at", None)
self.remote_updated_at = kwargs.get("remote_updated_at", None)
self.confidential = kwargs.get("confidential", None)
self.departments = kwargs.get("departments", None)
self.offices = kwargs.get("offices", None)
self.hiring_managers = kwargs.get("hiring_managers", None)
self.recruiters = kwargs.get("recruiters", None)
# Read only properties
self._id = kwargs.get("id", str())
self._remote_data = kwargs.get("remote_data", None)
self._remote_was_deleted = kwargs.get("remote_was_deleted", bool())
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Job - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
name (str, none_type): The job's name.. [optional] # noqa: E501
description (str, none_type): The job's description.. [optional] # noqa: E501
code (str, none_type): The job's code. Typically an additional identifier used to reference the particular job that is displayed on the ATS.. [optional] # noqa: E501
status (bool, dict, float, int, list, str, none_type): The job's status.. [optional] # noqa: E501
job_posting_urls ([Url]): [optional] # noqa: E501
remote_created_at (datetime, none_type): When the third party's job was created.. [optional] # noqa: E501
remote_updated_at (datetime, none_type): When the third party's job was updated.. [optional] # noqa: E501
confidential (bool, none_type): Whether the job is confidential.. [optional] # noqa: E501
departments ([str, none_type]): IDs of `Department` objects for this `Job`.. [optional] # noqa: E501
offices ([str, none_type]): IDs of `Office` objects for this `Job`.. [optional] # noqa: E501
hiring_managers ([str, none_type]): IDs of `RemoteUser` objects that serve as hiring managers for this `Job`.. [optional] # noqa: E501
recruiters ([str, none_type]): IDs of `RemoteUser` objects that serve as recruiters for this `Job`.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
remote_was_deleted (bool): Indicates whether or not this object has been deleted by third party webhooks.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id: Union[str, none_type] = kwargs.get("remote_id", None)
self.name: Union[str, none_type] = kwargs.get("name", None)
self.description: Union[str, none_type] = kwargs.get("description", None)
self.code: Union[str, none_type] = kwargs.get("code", None)
self.status: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("status", None)
self.job_posting_urls: Union[List["Url"]] = kwargs.get("job_posting_urls", None)
self.remote_created_at: Union[datetime, none_type] = kwargs.get("remote_created_at", None)
self.remote_updated_at: Union[datetime, none_type] = kwargs.get("remote_updated_at", None)
self.confidential: Union[bool, none_type] = kwargs.get("confidential", None)
self.departments: Union[List[str, none_type]] = kwargs.get("departments", list())
self.offices: Union[List[str, none_type]] = kwargs.get("offices", list())
self.hiring_managers: Union[List[str, none_type]] = kwargs.get("hiring_managers", list())
self.recruiters: Union[List[str, none_type]] = kwargs.get("recruiters", list())
# Read only properties
self._id: Union[str] = kwargs.get("id", str())
self._remote_data: Union[List["RemoteData"]] = kwargs.get("remote_data", None)
self._remote_was_deleted: Union[bool] = kwargs.get("remote_was_deleted", bool())
# Read only property getters
@property
def id(self):
return self._id
@property
def remote_data(self):
return self._remote_data
@property
def remote_was_deleted(self):
return self._remote_was_deleted | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/editor/plugins/EntityPalette.js | define("dojox/editor/plugins/EntityPalette",["dojo","dijit","dojox","dijit/_Widget","dijit/_TemplatedMixin","dijit/_PaletteMixin","dojo/_base/connect","dojo/_base/declare","dojo/i18n","dojo/i18n!dojox/editor/plugins/nls/latinEntities"],function(_1,_2,_3){
_1.experimental("dojox.editor.plugins.EntityPalette");
_1.declare("dojox.editor.plugins.EntityPalette",[_2._Widget,_2._TemplatedMixin,_2._PaletteMixin],{templateString:"<div class=\"dojoxEntityPalette\">\n"+"\t<table>\n"+"\t\t<tbody>\n"+"\t\t\t<tr>\n"+"\t\t\t\t<td>\n"+"\t\t\t\t\t<table class=\"dijitPaletteTable\">\n"+"\t\t\t\t\t\t<tbody dojoAttachPoint=\"gridNode\"></tbody>\n"+"\t\t\t\t </table>\n"+"\t\t\t\t</td>\n"+"\t\t\t</tr>\n"+"\t\t\t<tr>\n"+"\t\t\t\t<td>\n"+"\t\t\t\t\t<table dojoAttachPoint=\"previewPane\" class=\"dojoxEntityPalettePreviewTable\">\n"+"\t\t\t\t\t\t<tbody>\n"+"\t\t\t\t\t\t\t<tr>\n"+"\t\t\t\t\t\t\t\t<th class=\"dojoxEntityPalettePreviewHeader\">Preview</th>\n"+"\t\t\t\t\t\t\t\t<th class=\"dojoxEntityPalettePreviewHeader\" dojoAttachPoint=\"codeHeader\">Code</th>\n"+"\t\t\t\t\t\t\t\t<th class=\"dojoxEntityPalettePreviewHeader\" dojoAttachPoint=\"entityHeader\">Name</th>\n"+"\t\t\t\t\t\t\t\t<th class=\"dojoxEntityPalettePreviewHeader\">Description</th>\n"+"\t\t\t\t\t\t\t</tr>\n"+"\t\t\t\t\t\t\t<tr>\n"+"\t\t\t\t\t\t\t\t<td class=\"dojoxEntityPalettePreviewDetailEntity\" dojoAttachPoint=\"previewNode\"></td>\n"+"\t\t\t\t\t\t\t\t<td class=\"dojoxEntityPalettePreviewDetail\" dojoAttachPoint=\"codeNode\"></td>\n"+"\t\t\t\t\t\t\t\t<td class=\"dojoxEntityPalettePreviewDetail\" dojoAttachPoint=\"entityNode\"></td>\n"+"\t\t\t\t\t\t\t\t<td class=\"dojoxEntityPalettePreviewDetail\" dojoAttachPoint=\"descNode\"></td>\n"+"\t\t\t\t\t\t\t</tr>\n"+"\t\t\t\t\t\t</tbody>\n"+"\t\t\t\t\t</table>\n"+"\t\t\t\t</td>\n"+"\t\t\t</tr>\n"+"\t\t</tbody>\n"+"\t</table>\n"+"</div>",baseClass:"dojoxEntityPalette",showPreview:true,showCode:false,showEntityName:false,palette:"latin",dyeClass:"dojox.editor.plugins.LatinEntity",paletteClass:"editorLatinEntityPalette",cellClass:"dojoxEntityPaletteCell",postMixInProperties:function(){
var _4=_1.i18n.getLocalization("dojox.editor.plugins","latinEntities");
var _5=0;
var _6;
for(_6 in _4){
_5++;
}
var _7=Math.floor(Math.sqrt(_5));
var _8=_7;
var _9=0;
var _a=[];
var _b=[];
for(_6 in _4){
_9++;
_b.push(_6);
if(_9%_8===0){
_a.push(_b);
_b=[];
}
}
if(_b.length>0){
_a.push(_b);
}
this._palette=_a;
},buildRendering:function(){
this.inherited(arguments);
var _c=_1.i18n.getLocalization("dojox.editor.plugins","latinEntities");
this._preparePalette(this._palette,_c);
var _d=_1.query(".dojoxEntityPaletteCell",this.gridNode);
_1.forEach(_d,function(_e){
this.connect(_e,"onmouseenter","_onCellMouseEnter");
},this);
},_onCellMouseEnter:function(e){
this._displayDetails(e.target);
},postCreate:function(){
this.inherited(arguments);
_1.style(this.codeHeader,"display",this.showCode?"":"none");
_1.style(this.codeNode,"display",this.showCode?"":"none");
_1.style(this.entityHeader,"display",this.showEntityName?"":"none");
_1.style(this.entityNode,"display",this.showEntityName?"":"none");
if(!this.showPreview){
_1.style(this.previewNode,"display","none");
}
},_setCurrent:function(_f){
this.inherited(arguments);
if(this.showPreview){
this._displayDetails(_f);
}
},_displayDetails:function(_10){
var dye=this._getDye(_10);
if(dye){
var _11=dye.getValue();
var _12=dye._alias;
this.previewNode.innerHTML=_11;
this.codeNode.innerHTML="&#"+parseInt(_11.charCodeAt(0),10)+";";
this.entityNode.innerHTML="&"+_12+";";
var _13=_1.i18n.getLocalization("dojox.editor.plugins","latinEntities");
this.descNode.innerHTML=_13[_12].replace("\n","<br>");
}else{
this.previewNode.innerHTML="";
this.codeNode.innerHTML="";
this.entityNode.innerHTML="";
this.descNode.innerHTML="";
}
}});
_1.declare("dojox.editor.plugins.LatinEntity",null,{constructor:function(_14){
this._alias=_14;
},getValue:function(){
return "&"+this._alias+";";
},fillCell:function(_15){
_15.innerHTML=this.getValue();
}});
return _3.editor.plugins.EntityPalette;
}); | PypiClean |
/NiMARE-0.2.0rc2.tar.gz/NiMARE-0.2.0rc2/nimare/extract/utils.py | from __future__ import division
import logging
import os
import os.path as op
import numpy as np
import pandas as pd
import requests
from fuzzywuzzy import fuzz
from nimare.utils import _uk_to_us
LGR = logging.getLogger(__name__)
def get_data_dirs(data_dir=None):
"""Return the directories in which NiMARE looks for data.
.. versionadded:: 0.0.2
This is typically useful for the end-user to check where the data is
downloaded and stored.
Parameters
----------
data_dir: :obj:`pathlib.Path` or :obj:`str`, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
Returns
-------
paths : :obj:`list` of :obj:`str`
Paths of the dataset directories.
Notes
-----
Taken from Nilearn.
This function retrieves the datasets directories using the following
priority :
1. defaults system paths
2. the keyword argument data_dir
3. the global environment variable NIMARE_SHARED_DATA
4. the user environment variable NIMARE_DATA
5. nimare_data in the user home folder
"""
# We build an array of successive paths by priority
# The boolean indicates if it is a pre_dir: in that case, we won't add the
# dataset name to the path.
paths = []
# Check data_dir which force storage in a specific location
if data_dir is not None:
paths.extend(str(data_dir).split(os.pathsep))
# If data_dir has not been specified, then we crawl default locations
if data_dir is None:
global_data = os.getenv("NIMARE_SHARED_DATA")
if global_data is not None:
paths.extend(global_data.split(os.pathsep))
local_data = os.getenv("NIMARE_DATA")
if local_data is not None:
paths.extend(local_data.split(os.pathsep))
paths.append(os.path.expanduser("~/.nimare"))
return paths
def _get_dataset_dir(dataset_name, data_dir=None, default_paths=None):
"""Create if necessary and returns data directory of given dataset.
.. versionadded:: 0.0.2
Parameters
----------
dataset_name : :obj:`str`
The unique name of the dataset.
data_dir : :obj:`pathlib.Path` or :obj:`str`, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
default_paths : :obj:`list` of :obj:`str`, optional
Default system paths in which the dataset may already have been
installed by a third party software. They will be checked first.
Returns
-------
data_dir : :obj:`str`
Path of the given dataset directory.
Notes
-----
Taken from Nilearn.
This function retrieves the datasets directory (or data directory) using
the following priority :
1. defaults system paths
2. the keyword argument data_dir
3. the global environment variable NIMARE_SHARED_DATA
4. the user environment variable NIMARE_DATA
5. nimare_data in the user home folder
"""
paths = []
# Search possible data-specific system paths
if default_paths is not None:
for default_path in default_paths:
paths.extend([(d, True) for d in str(default_path).split(os.pathsep)])
paths.extend([(d, False) for d in get_data_dirs(data_dir=data_dir)])
LGR.debug(f"Dataset search paths: {paths}")
# Check if the dataset exists somewhere
for path, is_pre_dir in paths:
if not is_pre_dir:
path = os.path.join(path, dataset_name)
if os.path.islink(path):
# Resolve path
path = readlinkabs(path)
if os.path.exists(path) and os.path.isdir(path):
LGR.info(f"Dataset found in {path}\n")
return path
# If not, create a folder in the first writeable directory
errors = []
for path, is_pre_dir in paths:
if not is_pre_dir:
path = os.path.join(path, dataset_name)
if not os.path.exists(path):
try:
os.makedirs(path)
LGR.info(f"Dataset created in {path}")
return path
except Exception as exc:
short_error_message = getattr(exc, "strerror", str(exc))
errors.append(f"\n -{path} ({short_error_message})")
raise OSError(
"NiMARE tried to store the dataset in the following directories, but: " + "".join(errors)
)
def readlinkabs(link):
"""Return an absolute path for the destination of a symlink.
.. versionadded:: 0.0.2
From nilearn.
"""
path = os.readlink(link)
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(link), path)
def _download_zipped_file(url, filename=None):
"""Download from a URL to a file.
.. versionadded:: 0.0.2
"""
if filename is None:
data_dir = op.abspath(op.getcwd())
filename = op.join(data_dir, url.split("/")[-1])
# NOTE the stream=True parameter
req = requests.get(url, stream=True)
with open(filename, "wb") as f_obj:
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f_obj.write(chunk)
return filename
def _longify(df):
"""Expand comma-separated lists of aliases in DataFrame into separate rows.
.. versionadded:: 0.0.2
"""
reduced = df[["id", "name", "alias"]]
rows = []
for index, row in reduced.iterrows():
if isinstance(row["alias"], str) and "," in row["alias"]:
aliases = row["alias"].split(", ") + [row["name"]]
else:
aliases = [row["name"]]
for alias in aliases:
rows.append([row["id"], row["name"].lower(), alias.lower()])
out_df = pd.DataFrame(columns=["id", "name", "alias"], data=rows)
out_df = out_df.replace("", np.nan)
return out_df
def _get_ratio(tup):
"""Get fuzzy ratio.
.. versionadded:: 0.0.2
"""
if all(isinstance(t, str) for t in tup):
return fuzz.ratio(tup[0], tup[1])
else:
return 100
def _gen_alt_forms(term):
"""Generate a list of alternate forms for a given term.
.. versionadded:: 0.0.2
"""
if not isinstance(term, str) or len(term) == 0:
return [None]
alt_forms = []
# For one alternate form, put contents of parentheses at beginning of term
if "(" in term:
prefix = term[term.find("(") + 1 : term.find(")")]
temp_term = term.replace(f"({prefix})", "").replace(" ", " ")
alt_forms.append(temp_term)
alt_forms.append(f"{prefix} {temp_term}")
else:
prefix = ""
# Remove extra spaces
alt_forms = [s.strip() for s in alt_forms]
# Allow plurals
# temp = [s+'s' for s in alt_forms]
# temp += [s+'es' for s in alt_forms]
# alt_forms += temp
# Remove words "task" and/or "paradigm"
alt_forms += [term.replace(" task", "") for term in alt_forms]
alt_forms += [term.replace(" paradigm", "") for term in alt_forms]
# Remove duplicates
alt_forms = list(set(alt_forms))
return alt_forms
def _get_concept_reltype(relationship, direction):
"""Convert two-part relationship info to more parsimonious representation.
.. versionadded:: 0.0.2
The two part representation includes relationship type and direction.
"""
new_rel = None
if relationship == "PARTOF":
if direction == "child":
new_rel = "hasPart"
elif direction == "parent":
new_rel = "isPartOf"
elif relationship == "KINDOF":
if direction == "child":
new_rel = "hasKind"
elif direction == "parent":
new_rel = "isKindOf"
return new_rel
def _expand_df(df):
"""Add alternate forms to DataFrame, then sort DataFrame by alias length and similarity.
.. versionadded:: 0.0.2
Sorting by alias length is done for order of extraction from text. Sorting by similarity to
original name is done in order to select most appropriate term to associate with alias.
"""
df = df.copy()
df["alias"] = df["alias"].apply(_uk_to_us)
new_rows = []
for index, row in df.iterrows():
alias = row["alias"]
alt_forms = _gen_alt_forms(alias)
for alt_form in alt_forms:
temp_row = row.copy()
temp_row["alias"] = alt_form
new_rows.append(temp_row.tolist())
alt_df = pd.DataFrame(columns=df.columns, data=new_rows)
df = pd.concat((df, alt_df), axis=0)
# Sort by name length and similarity of alternate form to preferred term
# For example, "task switching" the concept should take priority over the
# "task switching" version of the "task-switching" task.
df["length"] = df["alias"].str.len()
df["ratio"] = df[["alias", "name"]].apply(_get_ratio, axis=1)
df = df.sort_values(by=["length", "ratio"], ascending=[False, False])
return df | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/htdocs/static/djblets/js/extensions/views/extensionManagerView.es6.172b5ba315a2.js | (function() {
/**
* An item in the list of registered extensions.
*
* This will contain information on the extension and actions for toggling
* the enabled state, reloading the extension, or configuring the extension.
*/
const ExtensionItem = Djblets.Config.ListItem.extend({
defaults: _.defaults({
extension: null,
}, Djblets.Config.ListItem.prototype.defaults),
/**
* Initialize the item.
*
* This will set up the initial state and then listen for any changes
* to the extension's state (caused by enabling/disabling/reloading the
* extension).
*/
initialize() {
Djblets.Config.ListItem.prototype.initialize.apply(this, arguments);
this._updateActions();
this._updateItemState();
this.listenTo(
this.get('extension'),
'change:loadable change:loadError change:enabled',
() => {
this._updateItemState();
this._updateActions();
});
},
/**
* Update the actions for the extension.
*
* If the extension is disabled, this will add an Enabled action.
*
* If it's enabled, but has a load error, it will add a Reload action.
*
* If it's enabled, it will provide actions for Configure and Database,
* if enabled by the extension, along with a Disable action.
*/
_updateActions() {
const extension = this.get('extension');
const actions = [];
if (!extension.get('loadable')) {
/* Add an action for reloading the extension. */
actions.push({
id: 'reload',
label: _`Reload`,
});
} else if (extension.get('enabled')) {
/*
* Show all the actions for enabled extensions.
*
* Note that the order used is here to ensure visual alignment
* for most-frequently-used options.
*/
const configURL = extension.get('configURL');
const dbURL = extension.get('dbURL');
if (dbURL) {
actions.push({
id: 'database',
label: _`Database`,
url: dbURL,
});
}
if (configURL) {
actions.push({
id: 'configure',
label: _`Configure`,
primary: true,
url: configURL,
});
}
actions.push({
id: 'disable',
label: _`Disable`,
danger: true,
});
} else {
/* Add an action for enabling a disabled extension. */
actions.push({
id: 'enable',
label: _`Enable`,
primary: true,
});
}
this.setActions(actions);
},
/**
* Update the state of this item.
*
* This will set the "error", "enabled", or "disabled" state of the
* item, depending on the corresponding state in the extension.
*/
_updateItemState() {
const extension = this.get('extension');
let itemState;
if (!extension.get('loadable')) {
itemState = 'error';
} else if (extension.get('enabled')) {
itemState = 'enabled';
} else {
itemState = 'disabled';
}
this.set('itemState', itemState);
},
});
/**
* Displays an extension in the Manage Extensions list.
*
* This will show information about the extension, and provide links for
* enabling/disabling the extension, and (depending on the extension's
* capabilities) configuring it or viewing its database.
*/
const ExtensionItemView = Djblets.Config.TableItemView.extend({
className: 'djblets-c-extension-item djblets-c-config-forms-list__item',
actionHandlers: {
'disable': '_onDisableClicked',
'enable': '_onEnableClicked',
'reload': '_onReloadClicked',
},
template: _.template(dedent`
<td class="djblets-c-config-forms-list__item-main">
<div class="djblets-c-extension-item__header">
<h3 class="djblets-c-extension-item__name"><%- name %></h3>
<span class="djblets-c-extension-item__version"><%- version %></span>
<div class="djblets-c-extension-item__author">
<% if (authorURL) { %>
<a href="<%- authorURL %>"><%- author %></a>
<% } else { %>
<%- author %>
<% } %>
</div>
</div>
<p class="djblets-c-extension-item__description">
<%- summary %>
</p>
<% if (!loadable) { %>
<pre class="djblets-c-extension-item__load-error"><%- loadError %></pre>
<% } %>
</td>
<td class="djblets-c-config-forms-list__item-state"></td>
<td></td>
`),
/**
* Return context data for rendering the item's template.
*
* Returns:
* object:
* Context data for the render.
*/
getRenderContext() {
return this.model.get('extension').attributes;
},
/**
* Handle a click on the Disable action.
*
* This will make an asynchronous request to disable the extension.
*
* Returns:
* Promise:
* A promise for the disable request. This will resolve once the
* API has handled the request.
*/
_onDisableClicked() {
return this.model.get('extension').disable()
.catch(error => {
alert(_`Failed to disable the extension: ${error.message}.`);
});
},
/**
* Handle a click on the Enable action.
*
* This will make an asynchronous request to enable the extension.
*
* Returns:
* Promise:
* A promise for the enable request. This will resolve once the
* API has handled the request.
*/
_onEnableClicked() {
return this.model.get('extension').enable()
.catch(error => {
alert(_`Failed to enable the extension: ${error.message}.`);
});
},
/**
* Handle a click on the Reload action.
*
* This will trigger an event on the item that tells the extension
* manager to perform a full reload of all extensions, this one included.
*
* Returns:
* Promise:
* A promise for the enable request. This will never resolve, in
* practice, but is returned to enable the action's spinner until
* the page reloads.
*/
_onReloadClicked() {
return new Promise(() => this.model.trigger('needsReload'));
},
});
/**
* Displays the interface showing all installed extensions.
*
* This loads the list of installed extensions and displays each in a list.
*/
Djblets.ExtensionManagerView = Backbone.View.extend({
events: {
'click .djblets-c-extensions__reload': '_reloadFull',
},
listItemsCollectionType: Djblets.Config.ListItems,
listItemType: ExtensionItem,
listItemViewType: ExtensionItemView,
listViewType: Djblets.Config.TableView,
/**
* Initialize the view.
*/
initialize() {
this.list = new Djblets.Config.List(
{},
{
collection: new this.listItemsCollectionType(
[],
{
model: this.listItemType,
})
});
},
/**
* Render the view.
*
* Returns:
* Djblets.ExtensionManagerView:
* This object, for chaining.
*/
render() {
const model = this.model;
const list = this.list;
this.listView = new this.listViewType({
el: this.$('.djblets-c-config-forms-list'),
model: list,
ItemView: this.listItemViewType,
});
this.listView.render().$el
.removeAttr('aria-busy')
.addClass('-all-items-are-multiline');
this._$listContainer = this.listView.$el.parent();
this.listenTo(model, 'loading', () => list.collection.reset());
this.listenTo(model, 'loaded', this._onLoaded);
model.load();
return this;
},
/**
* Handler for when the list of extensions is loaded.
*
* Renders each extension in the list. If the list is empty, this will
* display that there are no extensions installed.
*/
_onLoaded() {
const items = this.list.collection;
this.model.installedExtensions.each(extension => {
const item = items.add({
extension: extension,
});
this.listenTo(item, 'needsReload', this._reloadFull);
});
},
/**
* Perform a full reload of the list of extensions on the server.
*
* This submits our form, which is set in the template to tell the
* ExtensionManager to do a full reload.
*/
_reloadFull() {
this.el.submit();
},
});
})(); | PypiClean |
/FireWorks-2.0.3.tar.gz/FireWorks-2.0.3/fireworks/utilities/fw_utilities.py | import contextlib
import datetime
import errno
import logging
import multiprocessing
import os
import socket
import string
import sys
import traceback
from logging import Formatter, Logger
from multiprocessing.managers import BaseManager
from typing import Tuple
from fireworks.fw_config import DS_PASSWORD, FW_BLOCK_FORMAT, FW_LOGGING_FORMAT, FWData
__author__ = "Anubhav Jain, Xiaohui Qu"
__copyright__ = "Copyright 2012, The Materials Project"
__maintainer__ = "Anubhav Jain"
__email__ = "[email protected]"
__date__ = "Dec 12, 2012"
PREVIOUS_STREAM_LOGGERS = [] # contains the name of loggers that have already been initialized
PREVIOUS_FILE_LOGGERS = [] # contains the name of file loggers that have already been initialized
DEFAULT_FORMATTER = Formatter(FW_LOGGING_FORMAT)
def get_fw_logger(
name: str,
l_dir: None = None,
file_levels: Tuple[str, str] = ("DEBUG", "ERROR"),
stream_level: str = "DEBUG",
formatter: Formatter = DEFAULT_FORMATTER,
clear_logs: bool = False,
) -> Logger:
"""
Convenience method to return a logger.
Args:
name: name of the logger that sets the groups, e.g. 'group1.set2'
l_dir: the directory to put the log file
file_levels: iterable describing level(s) to log to file(s). default: ('DEBUG', 'ERROR')
stream_level: level to log to standard output. default: 'DEBUG'
formatter: logging format. default: FW_LOGGING_FORMATTER
clear_logs: whether to clear the logger with the same name
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG) # anything debug and above passes through to the handler level
stream_level = stream_level if stream_level else "CRITICAL"
# add handlers for the file_levels
if l_dir:
for lvl in file_levels:
f_name = os.path.join(l_dir, name.replace(".", "_") + "-" + lvl.lower() + ".log")
mode = "w" if clear_logs else "a"
fh = logging.FileHandler(f_name, mode=mode)
fh.setLevel(getattr(logging, lvl))
fh.setFormatter(formatter)
if f_name not in PREVIOUS_FILE_LOGGERS:
logger.addHandler(fh)
PREVIOUS_FILE_LOGGERS.append(f_name)
if (name, stream_level) not in PREVIOUS_STREAM_LOGGERS:
# add stream handler
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(getattr(logging, stream_level))
sh.setFormatter(formatter)
logger.addHandler(sh)
PREVIOUS_STREAM_LOGGERS.append((name, stream_level))
return logger
def log_multi(m_logger, msg, log_lvl="info"):
"""
Args:
m_logger (logger): The logger object
msg (str): a String to log
log_lvl (str): The level to log at
"""
_log_fnc = getattr(m_logger, log_lvl.lower())
if FWData().MULTIPROCESSING:
_log_fnc(f"{msg} : ({multiprocessing.current_process().name})")
else:
_log_fnc(msg)
def log_fancy(m_logger, msgs, log_lvl="info", add_traceback=False):
"""
A wrapper around the logger messages useful for multi-line logs.
Helps to group log messages by adding a fancy border around it,
which enhances readability of log lines meant to be read
as a unit.
Args:
m_logger (logger): The logger object
log_lvl (str): The level to log at
msgs ([str]): a String or iterable of Strings
add_traceback (bool): add traceback text, useful when logging exceptions (default False)
"""
if isinstance(msgs, str):
msgs = [msgs]
_log_fnc = getattr(m_logger, log_lvl.lower())
_log_fnc("----|vvv|----")
_log_fnc("\n".join(msgs))
if add_traceback:
_log_fnc(traceback.format_exc())
_log_fnc("----|^^^|----")
def log_exception(m_logger, msgs):
"""
A shortcut wrapper around log_fancy for exceptions
Args:
m_logger (logger): The logger object
msgs ([str]): String or iterable of Strings, will be joined by newlines
"""
return log_fancy(m_logger, msgs, "error", add_traceback=True)
def create_datestamp_dir(root_dir, l_logger, prefix="block_"):
"""
Internal method to create a new block or launcher directory.
The dir name is based on the time and the FW_BLOCK_FORMAT
Args:
root_dir: directory to create the new dir in
l_logger: the logger to use
prefix: the prefix for the new dir, default="block_"
"""
def get_path():
time_now = datetime.datetime.utcnow().strftime(FW_BLOCK_FORMAT)
block_path = prefix + time_now
return os.path.join(root_dir, block_path)
ctn = 0
max_try = 10
full_path = None
while full_path is None:
full_path = get_path()
if os.path.exists(full_path):
full_path = None
import random
import time
time.sleep(random.random() / 3 + 0.1)
continue
else:
try:
os.mkdir(full_path)
break
except OSError as e:
if ctn > max_try or e.errno != errno.EEXIST:
raise e
ctn += 1
full_path = None
continue
l_logger.info(f"Created new dir {full_path}")
return full_path
_g_ip, _g_host = None, None
def get_my_ip():
global _g_ip
if _g_ip is None:
try:
_g_ip = socket.gethostbyname(socket.gethostname())
except Exception:
_g_ip = "127.0.0.1"
return _g_ip
def get_my_host():
global _g_host
if _g_host is None:
_g_host = socket.gethostname()
return _g_host
def get_slug(m_str):
valid_chars = f"-_.() {string.ascii_letters}{string.digits}"
m_str = "".join(c for c in m_str if c in valid_chars)
return m_str.replace(" ", "_")
class DataServer(BaseManager):
"""
Provide a server that can host shared objects between multiprocessing
Processes (that normally can't share data). For example, a common LaunchPad is
shared between processes and pinging launches is coordinated to limit DB hits.
"""
@classmethod
def setup(cls, launchpad):
"""
Args:
launchpad (LaunchPad)
Returns:
DataServer
"""
DataServer.register("LaunchPad", callable=lambda: launchpad)
m = DataServer(address=("127.0.0.1", 0), authkey=DS_PASSWORD) # random port
m.start()
return m
class NestedClassGetter:
"""
Used to help pickle inner classes, e.g. see Workflow.Links
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# return an instance of a nested_class. Some more intelligence could be
# applied for class construction if necessary.
# To support for Pickling of Workflow.Links
return nested_class()
def explicit_serialize(o):
module_name = o.__module__
if module_name == "__main__":
import __main__
module_name = os.path.splitext(os.path.basename(__main__.__file__))[0]
o._fw_name = f"{{{{{module_name}.{o.__name__}}}}}"
return o
@contextlib.contextmanager
def redirect_local():
"""
temporarily redirect stdout or stderr to fws.error and fws.out
"""
try:
old_err = os.dup(sys.stderr.fileno())
old_out = os.dup(sys.stdout.fileno())
new_err = open("FW_job.error", "w")
new_out = open("FW_job.out", "w")
os.dup2(new_err.fileno(), sys.stderr.fileno())
os.dup2(new_out.fileno(), sys.stdout.fileno())
yield
finally:
os.dup2(old_err, sys.stderr.fileno())
os.dup2(old_out, sys.stdout.fileno())
new_err.close()
new_out.close() | PypiClean |
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/ClassicMomentum.py | import math
import numpy as np
import Abhilash1_optimizers.Activation as Activation
import Abhilash1_optimizers.hyperparameters as hyperparameters
import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer
#Adamax varaiation of ADAM with L**p norm over L**2 norm(p->infinity)
class Momentum():
def __init__(alpha,b_1,b_2,epsilon,noise_g):
return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g)
def init(m_t,v_t,t,theta):
return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta)
def Momentum_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale):
alpha,b_1,b_2,epsilon,noise_g=Momentum.__init__(alpha,b_1,b_2,epsilon,noise_g)
m_t,v_t,t,theta_0=Momentum.init(0,0,0,0)
final_weight_vector=[]
for i in range(len_data):
theta_0=data[i]
for i in range(max_itr):
t+=1
if(act_func=="softPlus"):
g_t=Activation.Activation.softplus(theta_0)
elif (act_func=="relu"):
g_t=Activation.Activation.relu(theta_0)
elif (act_func=="elu"):
g_t=Activation.Activation.elu(theta_0,alpha)
elif (act_func=="selu"):
g_t=Activation.Activation.selu(scale,theta_0,theta)
elif (act_func=="tanh"):
g_t=Activation.Activation.tanh(theta_0)
elif (act_func=="hardSigmoid"):
g_t=Activation.Activation.hard_sigmoid(theta_0)
elif (act_func=="softSign"):
g_t=Activation.Activation.softsign(theta_0)
elif (act_func=="linear"):
g_t=Activation.Activation.linear(theta_0)
elif (act_func=="exponential"):
g_t=Activation.Activation.exponential(theta_0)
m_t=b_1*m_t + 1.0*g_t
theta_prev=theta_0
alpha_t=(alpha*(m_t))
theta_0=theta_prev-(alpha_t)
print("Intrermediate gradients")
print("==========================================")
print("Previous gradient",theta_prev)
print("Present gradient",theta_0)
print("==========================================")
#if theta_0==theta_prev:
# break;
final_weight_vector.append(theta_0)
return final_weight_vector
def initialize(data,max_itr):
len_data=len(data)
optimized_weights=Momentum.Momentum_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale)
print("Optimized Weight Vector")
print("=====================================")
for i in range(len(optimized_weights)):
print("=====",optimized_weights[i])
if __name__=='__main__':
print("Verbose")
#t_0=Adagrad_optimizer()
#print("gradient coefficient",t_0)
#solve_grad=poly_func(t_0)
#print("Gradient Value",solve_grad)
sample_data=[1,0.5,0.7,0.1]
Momentum.initialize(sample_data,100) | PypiClean |
/Behaviour-0.1a4.tar.gz/Behaviour-0.1a4/example/machine/readme.txt | Machine Example
---------------
This is the same example as used in "Behaviour-Driven Testing with RSpec" by
Bruce Tate, translated from RSpec (Ruby) to Behaviour (Python). The original
article may be found at http://www.ibm.com/developerworks/web/library/wa-rspec/.
Listing 10 differs as it is impossible to get the specification to fail due to
some appallingly bad design in Python (in my view a client module should *NEVER*
be able to create new data elements -- this is reverting back to the bad old days
of COBOL and FORTRAN).
Ditto for Listing 15.
Note that I renamed the behaviours to get them to work with nose and pinocchio
for listing 23. | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/trackers/botsort/reid_multibackend.py | import torch.nn as nn
import torch
from pathlib import Path
import numpy as np
from itertools import islice
import torchvision.transforms as transforms
import cv2
import sys
import torchvision.transforms as T
from collections import OrderedDict, namedtuple
import gdown
from os.path import exists as file_exists
from ultralytics.yolo.utils.checks import check_requirements, check_version
from ultralytics.yolo.utils import LOGGER
from trackers.strongsort.deep.reid_model_factory import (show_downloadeable_models, get_model_url, get_model_name,
download_url, load_pretrained_weights)
from trackers.strongsort.deep.models import build_model
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
class ReIDDetectMultiBackend(nn.Module):
# ReID models MultiBackend class for python inference on various backends
def __init__(self, weights='osnet_x0_25_msmt17.pt', device=torch.device('cpu'), fp16=False):
super().__init__()
w = weights[0] if isinstance(weights, list) else weights
self.pt, self.jit, self.onnx, self.xml, self.engine, self.tflite = self.model_type(w) # get backend
self.fp16 = fp16
self.fp16 &= self.pt or self.jit or self.engine # FP16
# Build transform functions
self.device = device
self.image_size=(256, 128)
self.pixel_mean=[0.485, 0.456, 0.406]
self.pixel_std=[0.229, 0.224, 0.225]
self.transforms = []
self.transforms += [T.Resize(self.image_size)]
self.transforms += [T.ToTensor()]
self.transforms += [T.Normalize(mean=self.pixel_mean, std=self.pixel_std)]
self.preprocess = T.Compose(self.transforms)
self.to_pil = T.ToPILImage()
model_name = get_model_name(w)
if w.suffix == '.pt':
model_url = get_model_url(w)
if not file_exists(w) and model_url is not None:
gdown.download(model_url, str(w), quiet=False)
elif file_exists(w):
pass
else:
print(f'No URL associated to the chosen StrongSORT weights ({w}). Choose between:')
show_downloadeable_models()
exit()
# Build model
self.model = build_model(
model_name,
num_classes=1,
pretrained=not (w and w.is_file()),
use_gpu=device
)
if self.pt: # PyTorch
# populate model arch with weights
if w and w.is_file() and w.suffix == '.pt':
load_pretrained_weights(self.model, w)
self.model.to(device).eval()
self.model.half() if self.fp16 else self.model.float()
elif self.jit:
LOGGER.info(f'Loading {w} for TorchScript inference...')
self.model = torch.jit.load(w)
self.model.half() if self.fp16 else self.model.float()
elif self.onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
cuda = torch.cuda.is_available() and device.type != 'cpu'
#check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
self.session = onnxruntime.InferenceSession(str(w), providers=providers)
elif self.engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
if device.type == 'cpu':
device = torch.device('cuda:0')
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
logger = trt.Logger(trt.Logger.INFO)
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
self.model_ = runtime.deserialize_cuda_engine(f.read())
self.context = self.model_.create_execution_context()
self.bindings = OrderedDict()
self.fp16 = False # default updated below
dynamic = False
for index in range(self.model_.num_bindings):
name = self.model_.get_binding_name(index)
dtype = trt.nptype(self.model_.get_binding_dtype(index))
if self.model_.binding_is_input(index):
if -1 in tuple(self.model_.get_binding_shape(index)): # dynamic
dynamic = True
self.context.set_binding_shape(index, tuple(self.model_.get_profile_shape(0, index)[2]))
if dtype == np.float16:
self.fp16 = True
shape = tuple(self.context.get_binding_shape(index))
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items())
batch_size = self.bindings['images'].shape[0] # if dynamic, this is instead max batch size
elif self.xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...')
check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch
ie = Core()
if not Path(w).is_file(): # if not *.xml
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
if network.get_parameters()[0].get_layout().empty:
network.get_parameters()[0].set_layout(Layout("NCWH"))
batch_dim = get_batch(network)
if batch_dim.is_static:
batch_size = batch_dim.get_length()
self.executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
self.output_layer = next(iter(self.executable_network.outputs))
elif self.tflite:
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
self.interpreter = tf.lite.Interpreter(model_path=w)
self.interpreter.allocate_tensors()
# Get input and output tensors.
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
# Test model on random input data.
input_data = np.array(np.random.random_sample((1,256,128,3)), dtype=np.float32)
self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
self.interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
else:
print('This model framework is not supported yet!')
exit()
@staticmethod
def model_type(p='path/to/model.pt'):
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
from trackers.reid_export import export_formats
sf = list(export_formats().Suffix) # export suffixes
check_suffix(p, sf) # checks
types = [s in Path(p).name for s in sf]
return types
def _preprocess(self, im_batch):
images = []
for element in im_batch:
image = self.to_pil(element)
image = self.preprocess(image)
images.append(image)
images = torch.stack(images, dim=0)
images = images.to(self.device)
return images
def forward(self, im_batch):
# preprocess batch
im_batch = self._preprocess(im_batch)
# batch to half
if self.fp16 and im_batch.dtype != torch.float16:
im_batch = im_batch.half()
# batch processing
features = []
if self.pt:
features = self.model(im_batch)
elif self.jit: # TorchScript
features = self.model(im_batch)
elif self.onnx: # ONNX Runtime
im_batch = im_batch.cpu().numpy() # torch to numpy
features = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im_batch})[0]
elif self.engine: # TensorRT
if True and im_batch.shape != self.bindings['images'].shape:
i_in, i_out = (self.model_.get_binding_index(x) for x in ('images', 'output'))
self.context.set_binding_shape(i_in, im_batch.shape) # reshape if dynamic
self.bindings['images'] = self.bindings['images']._replace(shape=im_batch.shape)
self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out)))
s = self.bindings['images'].shape
assert im_batch.shape == s, f"input size {im_batch.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
self.binding_addrs['images'] = int(im_batch.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
features = self.bindings['output'].data
elif self.xml: # OpenVINO
im_batch = im_batch.cpu().numpy() # FP32
features = self.executable_network([im_batch])[self.output_layer]
else:
print('Framework not supported at the moment, we are working on it...')
exit()
if isinstance(features, (list, tuple)):
return self.from_numpy(features[0]) if len(features) == 1 else [self.from_numpy(x) for x in features]
else:
return self.from_numpy(features)
def from_numpy(self, x):
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
def warmup(self, imgsz=[(256, 128, 3)]):
# Warmup model by running inference once
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.tflite
if any(warmup_types) and self.device.type != 'cpu':
im = [np.empty(*imgsz).astype(np.uint8)] # input
for _ in range(2 if self.jit else 1): #
self.forward(im) # warmup | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.